index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkWordCountHBaseIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.hbase.HBaseSourceTarget;
import org.apache.crunch.io.hbase.HBaseTypes;
import org.apache.crunch.test.TemporaryPath;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormat;
import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import com.google.common.base.Joiner;
public class SparkWordCountHBaseIT {
static class StringifyFn extends MapFn<Pair<ImmutableBytesWritable, Pair<Result, Result>>, String> {
@Override
public String map(Pair<ImmutableBytesWritable, Pair<Result, Result>> input) {
byte[] firstStrBytes = input.second().first().getValue(WORD_COLFAM, null);
byte[] secondStrBytes = input.second().second().getValue(WORD_COLFAM, null);
if (firstStrBytes != null && secondStrBytes != null) {
return Joiner.on(',').join(new String(firstStrBytes, Charset.forName("UTF-8")),
new String(secondStrBytes, Charset.forName("UTF-8")));
}
return "";
}
}
@Rule
public TemporaryPath tmpDir = new TemporaryPath();
private static final byte[] COUNTS_COLFAM = Bytes.toBytes("cf");
private static final byte[] WORD_COLFAM = Bytes.toBytes("cf");
private HBaseTestingUtility hbaseTestUtil;
@SuppressWarnings("serial")
public static PTable<String, Long> wordCount(PTable<ImmutableBytesWritable, Result> words) {
return words.parallelDo(
new DoFn<Pair<ImmutableBytesWritable, Result>, String>() {
@Override
public void process(Pair<ImmutableBytesWritable, Result> row, Emitter<String> emitter) {
byte[] word = row.second().getValue(WORD_COLFAM, null);
if (word != null) {
emitter.emit(Bytes.toString(word));
}
}
}, words.getTypeFamily().strings()).count();
}
@SuppressWarnings("serial")
public static PCollection<Delete> clearCounts(PTable<ImmutableBytesWritable, Result> counts) {
return counts.parallelDo("convert to delete", new DoFn<Pair<ImmutableBytesWritable, Result>, Delete>() {
@Override
public void process(Pair<ImmutableBytesWritable, Result> input, Emitter<Delete> emitter) {
Delete delete = new Delete(input.first().get());
emitter.emit(delete);
}
}, HBaseTypes.deletes());
}
@Before
public void setUp() throws Exception {
Configuration conf = HBaseConfiguration.create(tmpDir.getDefaultConfiguration());
conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, tmpDir.getFile("hbase-staging").getAbsolutePath());
hbaseTestUtil = new HBaseTestingUtility(conf);
hbaseTestUtil.startMiniCluster();
}
@Test
public void testWordCount() throws Exception {
run(new SparkPipeline("local", "hbaseWordCount",
SparkWordCountHBaseIT.class, hbaseTestUtil.getConfiguration()));
}
@Test
public void testWordCountCustomFormat() throws Exception {
run(new SparkPipeline("local", "hbaseWordCountCustom",
SparkWordCountHBaseIT.class, hbaseTestUtil.getConfiguration()), MyTableInputFormat.class);
assertTrue(MyTableInputFormat.CONSTRUCTED.get());
}
@After
public void tearDown() throws Exception {
hbaseTestUtil.shutdownMiniCluster();
}
public void run(Pipeline pipeline) throws Exception {
run(pipeline, null);
}
public void run(Pipeline pipeline, Class<? extends MultiTableInputFormatBase> clazz) throws Exception {
Random rand = new Random();
int postFix = rand.nextInt() & 0x7FFFFFFF;
TableName inputTableName = TableName.valueOf("crunch_words_" + postFix);
TableName outputTableName = TableName.valueOf("crunch_counts_" + postFix);
TableName otherTableName = TableName.valueOf("crunch_other_" + postFix);
Table inputTable = hbaseTestUtil.createTable(inputTableName, WORD_COLFAM);
int key = 0;
key = put(inputTable, key, "cat");
key = put(inputTable, key, "cat");
key = put(inputTable, key, "dog");
inputTable.close();
//Setup scan using multiple scans that simply cut the rows in half.
Scan scan = new Scan();
scan.addFamily(WORD_COLFAM);
byte[] cutoffPoint = Bytes.toBytes(2);
scan.setStopRow(cutoffPoint);
Scan scan2 = new Scan();
scan.addFamily(WORD_COLFAM);
scan2.setStartRow(cutoffPoint);
HBaseSourceTarget source = null;
if (clazz == null) {
source = new HBaseSourceTarget(inputTableName, scan, scan2);
} else {
source = new HBaseSourceTarget(inputTableName, clazz, new Scan[]{scan, scan2});
}
PTable<ImmutableBytesWritable, Result> words = pipeline.read(source);
PTable<String, Long> counts = wordCount(words);
Map<String, Long> countMap = counts.materializeToMap();
assertEquals(2, countMap.size());
assertEquals(2L, countMap.get("cat").longValue());
assertEquals(1L, countMap.get("dog").longValue());
pipeline.done();
}
protected int put(Table table, int key, String value) throws IOException {
Put put = new Put(Bytes.toBytes(key));
put.addColumn(WORD_COLFAM, null, Bytes.toBytes(value));
table.put(put);
return key + 1;
}
public static class MyTableInputFormat extends MultiTableInputFormat{
public static final AtomicBoolean CONSTRUCTED = new AtomicBoolean();
public MyTableInputFormat(){
CONSTRUCTED.set(true);
}
}
}
| 2,200 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkEmptyPCollectionIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.Iterables;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.writable.Writables;
import org.junit.Rule;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class SparkEmptyPCollectionIT {
private static class SplitFn extends DoFn<String, Pair<String, Long>> {
@Override
public void process(String input, Emitter<Pair<String, Long>> emitter) {
for (String word : input.split("\\s+")) {
emitter.emit(Pair.of(word, 1L));
}
}
}
@Rule
public TemporaryPath tempDir = new TemporaryPath();
@Test
public void testEmptyMR() throws Exception {
Pipeline p = new SparkPipeline("local", "empty");
assertTrue(Iterables.isEmpty(p.emptyPCollection(Writables.strings())
.parallelDo(new SplitFn(), Writables.tableOf(Writables.strings(), Writables.longs()))
.groupByKey()
.combineValues(Aggregators.SUM_LONGS())
.materialize()));
p.done();
}
@Test
public void testUnionWithEmptyMR() throws Exception {
Pipeline p = new SparkPipeline("local", "empty");
assertFalse(Iterables.isEmpty(p.emptyPCollection(Writables.strings())
.parallelDo(new SplitFn(), Writables.tableOf(Writables.strings(), Writables.longs()))
.union(
p.read(From.textFile(tempDir.copyResourceFileName("shakes.txt")))
.parallelDo(new SplitFn(), Writables.tableOf(Writables.strings(), Writables.longs())))
.groupByKey()
.combineValues(Aggregators.SUM_LONGS())
.materialize()));
p.done();
}
@Test
public void testUnionTableWithEmptyMR() throws Exception {
Pipeline p = new SparkPipeline("local", "empty");
assertFalse(Iterables.isEmpty(p.emptyPTable(Writables.tableOf(Writables.strings(), Writables.longs()))
.union(
p.read(From.textFile(tempDir.copyResourceFileName("shakes.txt")))
.parallelDo(new SplitFn(), Writables.tableOf(Writables.strings(), Writables.longs())))
.groupByKey()
.combineValues(Aggregators.SUM_LONGS())
.materialize()));
p.done();
}
}
| 2,201 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SkipPTypesIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.io.seq.SeqFileTableSourceTarget;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.junit.Rule;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class SkipPTypesIT {
@Rule
public TemporaryPath tempDir = new TemporaryPath();
PTableType<Text, LongWritable> ptt = Writables.tableOf(Writables.writables(Text.class),
Writables.writables(LongWritable.class));
@Test
public void testSkipPTypes() throws Exception {
String out = tempDir.getFileName("out");
SparkPipeline pipeline = new SparkPipeline("local", "skipptypes");
PCollection<String> shakes = pipeline.read(From.textFile(tempDir.copyResourceFileName("shakes.txt")));
PTable<String, Long> wcnt = shakes.count();
wcnt.write(new MySeqFileTableSourceTarget(out, ptt));
pipeline.run();
PTable<Text, LongWritable> wcntIn = pipeline.read(new MySeqFileTableSourceTarget(out, ptt));
assertEquals(new LongWritable(1L), wcntIn.materialize().iterator().next().second());
pipeline.done();
}
static class ToWritables extends MapFn<Pair<String, Long>, Pair<Text, LongWritable>> {
@Override
public Pair<Text, LongWritable> map(Pair<String, Long> input) {
return Pair.of(new Text(input.first()), new LongWritable(input.second()));
}
}
static class MySeqFileTableSourceTarget extends SeqFileTableSourceTarget {
public MySeqFileTableSourceTarget(String path, PTableType ptype) {
super(path, ptype);
}
@Override
public Converter getConverter() {
return new SkipPTypesConverter(getType().getConverter());
}
}
static class SkipPTypesConverter implements Converter {
private Converter delegate;
public SkipPTypesConverter(Converter delegate) {
this.delegate = delegate;
}
@Override
public Object convertInput(Object key, Object value) {
return delegate.convertInput(key, value);
}
@Override
public Object convertIterableInput(Object key, Iterable value) {
return delegate.convertIterableInput(key, value);
}
@Override
public Object outputKey(Object value) {
return delegate.outputKey(value);
}
@Override
public Object outputValue(Object value) {
return delegate.outputValue(value);
}
@Override
public Class getKeyClass() {
return delegate.getKeyClass();
}
@Override
public Class getValueClass() {
return delegate.getValueClass();
}
@Override
public boolean applyPTypeTransforms() {
return false;
}
}
}
| 2,202 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkSortIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.Lists;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.lib.Sort;
import org.apache.crunch.test.StringWrapper;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import static org.apache.crunch.lib.Sort.ColumnOrder.by;
import static org.apache.crunch.lib.Sort.Order.ASCENDING;
import static org.apache.crunch.lib.Sort.Order.DESCENDING;
import static org.apache.crunch.test.StringWrapper.wrap;
import static org.junit.Assert.assertEquals;
public class SparkSortIT implements Serializable {
@Rule
public transient TemporaryPath tmpDir = new TemporaryPath();
@Test
public void testWritableSortAsc() throws Exception {
runSingle(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), Sort.Order.ASCENDING,
"A\tand this text as well");
}
@Test
public void testWritableSortDesc() throws Exception {
runSingle(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), Sort.Order.DESCENDING,
"B\tthis doc has some text");
}
@Test
public void testWritableSortAscDesc() throws Exception {
runPair(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING), "A",
"this doc has this text");
}
@Test
public void testWritableSortSecondDescFirstAsc() throws Exception {
runPair(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), by(2, DESCENDING), by(1, ASCENDING), "A",
"this doc has this text");
}
@Test
public void testWritableSortTripleAscDescAsc() throws Exception {
runTriple(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING),
by(3, ASCENDING), "A", "this", "doc");
}
@Test
public void testWritableSortQuadAscDescAscDesc() throws Exception {
runQuad(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING),
by(3, ASCENDING), by(4, DESCENDING), "A", "this", "doc", "has");
}
@Test
public void testWritableSortTupleNAscDesc() throws Exception {
runTupleN(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(),
new Sort.ColumnOrder[] { by(1, ASCENDING), by(2, DESCENDING) }, new String[] { "A", "this doc has this text" });
}
@Test
public void testWritableSortTable() throws Exception {
runTable(new SparkPipeline("local", "sort"), WritableTypeFamily.getInstance(), "A");
}
@Test
public void testAvroSortAsc() throws Exception {
runSingle(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), Sort.Order.ASCENDING, "A\tand this text as well");
}
@Test
public void testAvroSortDesc() throws Exception {
runSingle(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), Sort.Order.DESCENDING, "B\tthis doc has some text");
}
@Test
public void testAvroSortPairAscDesc() throws Exception {
runPair(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING), "A",
"this doc has this text");
}
@Test
public void testAvroSortPairSecondDescFirstAsc() throws Exception {
runPair(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), by(2, DESCENDING), by(1, ASCENDING), "A",
"this doc has this text");
}
@Test
public void testAvroSortTripleAscDescAsc() throws Exception {
runTriple(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING),
by(3, ASCENDING), "A", "this", "doc");
}
@Test
public void testAvroSortQuadAscDescAscDesc() throws Exception {
runQuad(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), by(1, ASCENDING), by(2, DESCENDING),
by(3, ASCENDING), by(4, DESCENDING), "A", "this", "doc", "has");
}
@Test
public void testAvroSortTupleNAscDesc() throws Exception {
runTupleN(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(),
new Sort.ColumnOrder[] { by(1, ASCENDING), by(2, DESCENDING) }, new String[] { "A", "this doc has this text" });
}
@Test
public void testAvroReflectSortPair() throws IOException {
Pipeline pipeline = new SparkPipeline("local", "sort");
pipeline.enableDebug();
String rsrc = tmpDir.copyResourceFileName("set2.txt");
PCollection<Pair<String, StringWrapper>> in = pipeline.readTextFile(rsrc)
.parallelDo(new MapFn<String, Pair<String, StringWrapper>>() {
@Override
public Pair<String, StringWrapper> map(String input) {
return Pair.of(input, wrap(input));
}
}, Avros.pairs(Avros.strings(), Avros.reflects(StringWrapper.class)));
PCollection<Pair<String, StringWrapper>> sorted = Sort.sort(in, Sort.Order.ASCENDING);
List<Pair<String, StringWrapper>> expected = Lists.newArrayList();
expected.add(Pair.of("a", wrap("a")));
expected.add(Pair.of("c", wrap("c")));
expected.add(Pair.of("d", wrap("d")));
assertEquals(expected, Lists.newArrayList(sorted.materialize()));
pipeline.done();
}
@Test
public void testAvroReflectSortTable() throws IOException {
Pipeline pipeline = new SparkPipeline("local", "sort");
PTable<String, StringWrapper> unsorted = pipeline.readTextFile(tmpDir.copyResourceFileName("set2.txt")).parallelDo(
new MapFn<String, Pair<String, StringWrapper>>() {
@Override
public Pair<String, StringWrapper> map(String input) {
return Pair.of(input, wrap(input));
}
}, Avros.tableOf(Avros.strings(), Avros.reflects(StringWrapper.class)));
PTable<String, StringWrapper> sorted = Sort.sort(unsorted);
List<Pair<String, StringWrapper>> expected = Lists.newArrayList();
expected.add(Pair.of("a", wrap("a")));
expected.add(Pair.of("c", wrap("c")));
expected.add(Pair.of("d", wrap("d")));
assertEquals(expected, Lists.newArrayList(sorted.materialize()));
pipeline.done();
}
@Test
public void testAvroSortTable() throws Exception {
runTable(new SparkPipeline("local", "sort"), AvroTypeFamily.getInstance(), "A");
}
private void runSingle(Pipeline pipeline, PTypeFamily typeFamily, Sort.Order order, String firstLine) throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
// following turns the input from Writables to required type family
PCollection<String> input2 = input.parallelDo(new DoFn<String, String>() {
@Override
public void process(String input, Emitter<String> emitter) {
emitter.emit(input);
}
}, typeFamily.strings());
PCollection<String> sorted = Sort.sort(input2, order);
Iterable<String> lines = sorted.materialize();
assertEquals(firstLine, lines.iterator().next());
pipeline.done(); // TODO: finally
}
private void runPair(Pipeline pipeline, PTypeFamily typeFamily, Sort.ColumnOrder first, Sort.ColumnOrder second,
String firstField, String secondField) throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
PTable<String, String> kv = input.parallelDo(new DoFn<String, Pair<String, String>>() {
@Override
public void process(String input, Emitter<Pair<String, String>> emitter) {
String[] split = input.split("[\t]+");
emitter.emit(Pair.of(split[0], split[1]));
}
}, typeFamily.tableOf(typeFamily.strings(), typeFamily.strings()));
PCollection<Pair<String, String>> sorted = Sort.sortPairs(kv, first, second);
List<Pair<String, String>> lines = Lists.newArrayList(sorted.materialize());
Pair<String, String> l = lines.iterator().next();
assertEquals(firstField, l.first());
assertEquals(secondField, l.second());
pipeline.done();
}
private void runTriple(Pipeline pipeline, PTypeFamily typeFamily, Sort.ColumnOrder first, Sort.ColumnOrder second,
Sort.ColumnOrder third, String firstField, String secondField, String thirdField) throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
PCollection<Tuple3<String, String, String>> kv = input.parallelDo(
new DoFn<String, Tuple3<String, String, String>>() {
@Override
public void process(String input, Emitter<Tuple3<String, String, String>> emitter) {
String[] split = input.split("[\t ]+");
int len = split.length;
emitter.emit(Tuple3.of(split[0], split[1 % len], split[2 % len]));
}
}, typeFamily.triples(typeFamily.strings(), typeFamily.strings(), typeFamily.strings()));
PCollection<Tuple3<String, String, String>> sorted = Sort.sortTriples(kv, first, second, third);
List<Tuple3<String, String, String>> lines = Lists.newArrayList(sorted.materialize());
Tuple3<String, String, String> l = lines.iterator().next();
assertEquals(firstField, l.first());
assertEquals(secondField, l.second());
assertEquals(thirdField, l.third());
pipeline.done();
}
private void runQuad(Pipeline pipeline, PTypeFamily typeFamily, Sort.ColumnOrder first, Sort.ColumnOrder second,
Sort.ColumnOrder third, Sort.ColumnOrder fourth, String firstField, String secondField, String thirdField,
String fourthField) throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
PCollection<Tuple4<String, String, String, String>> kv = input.parallelDo(
new DoFn<String, Tuple4<String, String, String, String>>() {
@Override
public void process(String input, Emitter<Tuple4<String, String, String, String>> emitter) {
String[] split = input.split("[\t ]+");
int len = split.length;
emitter.emit(Tuple4.of(split[0], split[1 % len], split[2 % len], split[3 % len]));
}
}, typeFamily.quads(typeFamily.strings(), typeFamily.strings(), typeFamily.strings(), typeFamily.strings()));
PCollection<Tuple4<String, String, String, String>> sorted = Sort.sortQuads(kv, first, second, third, fourth);
Iterable<Tuple4<String, String, String, String>> lines = sorted.materialize();
Tuple4<String, String, String, String> l = lines.iterator().next();
assertEquals(firstField, l.first());
assertEquals(secondField, l.second());
assertEquals(thirdField, l.third());
assertEquals(fourthField, l.fourth());
pipeline.done();
}
private void runTupleN(Pipeline pipeline, PTypeFamily typeFamily, Sort.ColumnOrder[] orders, String[] fields)
throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
PType[] types = new PType[orders.length];
Arrays.fill(types, typeFamily.strings());
PCollection<TupleN> kv = input.parallelDo(new DoFn<String, TupleN>() {
@Override
public void process(String input, Emitter<TupleN> emitter) {
String[] split = input.split("[\t]+");
emitter.emit(new TupleN(split));
}
}, typeFamily.tuples(types));
PCollection<TupleN> sorted = Sort.sortTuples(kv, orders);
Iterable<TupleN> lines = sorted.materialize();
TupleN l = lines.iterator().next();
int i = 0;
for (String field : fields) {
assertEquals(field, l.get(i++));
}
pipeline.done();
}
private void runTable(Pipeline pipeline, PTypeFamily typeFamily, String firstKey) throws IOException {
String inputPath = tmpDir.copyResourceFileName("docs.txt");
PCollection<String> input = pipeline.readTextFile(inputPath);
PTable<String, String> table = input.parallelDo(new DoFn<String, Pair<String, String>>() {
@Override
public void process(String input, Emitter<Pair<String, String>> emitter) {
String[] split = input.split("[\t]+");
emitter.emit(Pair.of(split[0], split[1]));
}
}, typeFamily.tableOf(typeFamily.strings(), typeFamily.strings()));
PTable<String, String> sorted = Sort.sort(table);
Iterable<Pair<String, String>> lines = sorted.materialize();
Pair<String, String> l = lines.iterator().next();
assertEquals(firstKey, l.first());
pipeline.done();
}
}
| 2,203 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkAggregatorIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.avro.Avros;
import org.junit.Rule;
import org.junit.Test;
import java.util.Collection;
import static org.junit.Assert.assertEquals;
public class SparkAggregatorIT {
@Rule
public TemporaryPath tempDir = new TemporaryPath();
@Test
public void testCount() throws Exception {
SparkPipeline pipeline = new SparkPipeline("local", "aggregator");
PCollection<String> set1 = pipeline.read(From.textFile(tempDir.copyResourceFileName("set1.txt")));
PCollection<String> set2 = pipeline.read(From.textFile(tempDir.copyResourceFileName("set2.txt")));
Iterable<Pair<Integer, Long>> cnts = set1.union(set2)
.parallelDo(new CntFn(), Avros.ints())
.count().materialize();
assertEquals(ImmutableList.of(Pair.of(1, 7L)), Lists.newArrayList(cnts));
pipeline.done();
}
@Test
public void testAvroFirstN() throws Exception {
SparkPipeline pipeline = new SparkPipeline("local", "aggregator");
PCollection<String> set1 = pipeline.read(From.textFile(tempDir.copyResourceFileName("set1.txt"), Avros.strings()));
PCollection<String> set2 = pipeline.read(From.textFile(tempDir.copyResourceFileName("set2.txt"), Avros.strings()));
Aggregator<String> first5 = Aggregators.FIRST_N(5);
Collection<String> aggregate = set1.union(set2).aggregate(first5).asCollection().getValue();
pipeline.done();
assertEquals(5, aggregate.size());
}
private static class CntFn extends MapFn<String, Integer> {
@Override
public Integer map(String input) {
return 1;
}
}
}
| 2,204 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkTaskAttemptIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.io.To;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.avro.Avros;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class SparkTaskAttemptIT {
@Rule
public TemporaryPath tempDir = new TemporaryPath();
private SparkPipeline pipeline;
@Before
public void setUp() throws IOException {
pipeline = new SparkPipeline("local", "taskattempt");
}
@After
public void tearDown() throws Exception {
pipeline.done();
}
@Test
public void testTaskAttempts() throws Exception {
String inputPath = tempDir.copyResourceFileName("set1.txt");
String inputPath2 = tempDir.copyResourceFileName("set2.txt");
PCollection<String> first = pipeline.read(From.textFile(inputPath));
PCollection<String> second = pipeline.read(From.textFile(inputPath2));
Iterable<Pair<Integer, Long>> cnts = first.union(second)
.parallelDo(new TaskMapFn(), Avros.ints())
.count()
.materialize();
assertEquals(ImmutableSet.of(Pair.of(0, 4L), Pair.of(1, 3L)), Sets.newHashSet(cnts));
}
private static class TaskMapFn extends MapFn<String, Integer> {
@Override
public Integer map(String input) {
return getContext().getTaskAttemptID().getTaskID().getId();
}
}
}
| 2,205 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SmallCollectionLengthTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
import org.apache.crunch.Pipeline;
import org.apache.crunch.types.avro.Avros;
import org.junit.Test;
import java.io.Serializable;
import java.util.ArrayList;
import javax.annotation.Nullable;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
public class SmallCollectionLengthTest implements Serializable {
@Test
public void smallCollectionsShouldNotHaveNullLength() throws Exception {
Pipeline p = new SparkPipeline("local", "foobar");
final ImmutableList<String>
allFruits =
ImmutableList.of("apelsin", "banan", "citron", "daddel");
final ArrayList<ImmutableList<String>> fruitLists = new ArrayList<>();
for (int i = 0; i <= allFruits.size(); ++i) {
fruitLists.add(ImmutableList.copyOf(allFruits.subList(0, i)));
}
final ArrayList<PObject<Long>> results = new ArrayList<>();
for (ImmutableList<String> fruit : fruitLists) {
final PCollection<String> collection = p.create(fruit, Avros.strings());
results.add(collection.length());
}
p.run();
final Iterable<Long>
lengths =
Iterables.transform(results, new Function<PObject<Long>, Long>() {
@Nullable
@Override
public Long apply(@Nullable PObject<Long> input) {
return input.getValue();
}
});
for (Long length : lengths) {
assertThat(length, not(nullValue()));
}
p.done();
}
}
| 2,206 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkMapsideJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.Lists;
import org.apache.crunch.fn.FilterFns;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.lib.join.JoinStrategy;
import org.apache.crunch.lib.join.JoinType;
import org.apache.crunch.lib.join.MapsideJoinStrategy;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.writable.Writables;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class SparkMapsideJoinIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
private static class LineSplitter extends MapFn<String, Pair<Integer, String>> {
@Override
public Pair<Integer, String> map(String input) {
String[] fields = input.split("\\|");
return Pair.of(Integer.parseInt(fields[0]), fields[1]);
}
}
private static class CapOrdersFn extends MapFn<String, String> {
@Override
public String map(String v) {
return v.toUpperCase(Locale.ENGLISH);
}
}
private static class ConcatValuesFn extends MapFn<Pair<String, String>, String> {
@Override
public String map(Pair<String, String> v) {
return v.toString();
}
}
@Rule
public TemporaryPath tmpDir = new TemporaryPath();
@Test
public void testMapsideJoin_RightSideIsEmpty() throws IOException {
Pipeline pipeline = new SparkPipeline("local", "mapside");
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> filteredOrderTable = orderTable
.parallelDo(FilterFns.<Pair<Integer, String>>REJECT_ALL(), orderTable.getPTableType());
JoinStrategy<Integer, String, String> mapsideJoin = new MapsideJoinStrategy<Integer, String, String>();
PTable<Integer, Pair<String, String>> joined = mapsideJoin.join(customerTable, filteredOrderTable, JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> materializedJoin = Lists.newArrayList(joined.materialize());
assertTrue(materializedJoin.isEmpty());
pipeline.done();
}
@Test
public void testMapsideJoin() throws IOException {
runMapsideJoin(new SparkPipeline("local", "mapside"), false);
}
@Test
public void testMapsideJoin_Materialized() throws IOException {
runMapsideJoin(new SparkPipeline("local", "mapside"), true);
}
@Test
public void testMapsideJoin_LeftOuterJoin() throws IOException {
runMapsideLeftOuterJoin(new SparkPipeline("local", "mapside"), false);
}
@Test
public void testMapsideJoin_LeftOuterJoin_Materialized() throws IOException {
runMapsideLeftOuterJoin(new SparkPipeline("local", "mapside"), true);
}
private void runMapsideJoin(Pipeline pipeline, boolean materialize) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
JoinStrategy<Integer, String, String> mapsideJoin = new MapsideJoinStrategy<Integer, String, String>(materialize);
PTable<Integer, String> custOrders = mapsideJoin.join(customerTable, orderTable, JoinType.INNER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined = mapsideJoin.join(custOrders, ORDER_TABLE, JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("[John Doe,Corn flakes]", "CORN FLAKES")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(333, Pair.of("[Someone Else,Toilet brush]", "TOILET BRUSH")));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
pipeline.done();
}
private void runMapsideLeftOuterJoin(Pipeline pipeline, boolean materialize) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
JoinStrategy<Integer, String, String> mapsideJoin = new MapsideJoinStrategy<Integer, String, String>(materialize);
PTable<Integer, String> custOrders = mapsideJoin.join(customerTable, orderTable, JoinType.LEFT_OUTER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined = mapsideJoin.join(custOrders, ORDER_TABLE, JoinType.LEFT_OUTER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("[John Doe,Corn flakes]", "CORN FLAKES")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(333, Pair.of("[Someone Else,Toilet brush]", "TOILET BRUSH")));
expectedJoinResult.add(Pair.of(444, Pair.<String,String>of("[Has No Orders,null]", null)));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
pipeline.done();
}
private PTable<Integer, String> readTable(Pipeline pipeline, String filename) {
try {
return pipeline.readTextFile(tmpDir.copyResourceFileName(filename)).parallelDo("asTable",
new LineSplitter(),
Writables.tableOf(Writables.ints(), Writables.strings()));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 2,207 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkUnionResultsIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.At;
import org.apache.crunch.io.To;
import org.apache.crunch.lib.PTables;
import org.apache.crunch.test.CrunchTestSupport;
import org.apache.crunch.types.writable.Writables;
import org.junit.Test;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import java.util.Set;
import static org.junit.Assert.assertEquals;
public class SparkUnionResultsIT extends CrunchTestSupport implements Serializable {
static class StringLengthMapFn extends MapFn<String, Pair<String, Long>> {
@Override
public Pair<String, Long> map(String input) {
increment("my", "counter");
return new Pair<String, Long>(input, 10L);
}
}
/**
* Tests combining a GBK output with a map-only job output into a single
* unioned collection.
*/
@Test
public void testUnionOfGroupedOutputAndNonGroupedOutput() throws IOException {
String inputPath = tempDir.copyResourceFileName("set1.txt");
String inputPath2 = tempDir.copyResourceFileName("set2.txt");
Pipeline pipeline = new SparkPipeline("local", "unionresults");
PCollection<String> set1Lines = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<Pair<String, Long>> set1Lengths = set1Lines.parallelDo(new StringLengthMapFn(),
Writables.pairs(Writables.strings(), Writables.longs()));
PCollection<Pair<String, Long>> set2Counts = pipeline.read(At.textFile(inputPath2, Writables.strings())).count();
PCollection<Pair<String, Long>> union = set1Lengths.union(set2Counts);
Set<Pair<String, Long>> unionValues = Sets.newHashSet(union.materialize());
assertEquals(7, unionValues.size());
Set<Pair<String, Long>> expectedPairs = Sets.newHashSet();
expectedPairs.add(Pair.of("b", 10L));
expectedPairs.add(Pair.of("c", 10L));
expectedPairs.add(Pair.of("a", 10L));
expectedPairs.add(Pair.of("e", 10L));
expectedPairs.add(Pair.of("a", 1L));
expectedPairs.add(Pair.of("c", 1L));
expectedPairs.add(Pair.of("d", 1L));
assertEquals(expectedPairs, unionValues);
pipeline.done();
}
@Test
public void testMultiGroupBy() throws Exception {
String inputPath = tempDir.copyResourceFileName("set1.txt");
String inputPath2 = tempDir.copyResourceFileName("set2.txt");
String output = tempDir.getFileName("output");
Pipeline pipeline = new SparkPipeline("local", "multigroupby");
PCollection<String> set1Lines = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<Pair<String, Long>> set1Lengths = set1Lines.parallelDo(new StringLengthMapFn(),
Writables.pairs(Writables.strings(), Writables.longs()));
PTable<String, Long> set2Counts = pipeline.read(At.textFile(inputPath2, Writables.strings())).count();
PTables.asPTable(set2Counts.union(set1Lengths)).groupByKey().ungroup()
.write(At.sequenceFile(output, Writables.strings(), Writables.longs()));
PipelineResult res = pipeline.done();
assertEquals(4, res.getStageResults().get(0).getCounterValue("my", "counter"));
}
@Test
public void testMultiWrite() throws Exception {
String inputPath = tempDir.copyResourceFileName("set1.txt");
String inputPath2 = tempDir.copyResourceFileName("set2.txt");
String output = tempDir.getFileName("output");
Pipeline pipeline = new SparkPipeline("local", "multiwrite");
PCollection<String> set1Lines = pipeline.read(At.textFile(inputPath, Writables.strings()));
PTable<String, Long> set1Lengths = set1Lines.parallelDo(new StringLengthMapFn(),
Writables.tableOf(Writables.strings(), Writables.longs()));
PTable<String, Long> set2Counts = pipeline.read(At.textFile(inputPath2, Writables.strings())).count();
TableSourceTarget<String, Long> inter = At.sequenceFile(output, Writables.strings(), Writables.longs());
set1Lengths.write(inter);
set2Counts.write(inter, Target.WriteMode.APPEND);
pipeline.run();
PTable<String, Long> in = pipeline.read(inter);
Set<Pair<String, Long>> values = Sets.newHashSet(in.materialize());
assertEquals(7, values.size());
Set<Pair<String, Long>> expectedPairs = Sets.newHashSet();
expectedPairs.add(Pair.of("b", 10L));
expectedPairs.add(Pair.of("c", 10L));
expectedPairs.add(Pair.of("a", 10L));
expectedPairs.add(Pair.of("e", 10L));
expectedPairs.add(Pair.of("a", 1L));
expectedPairs.add(Pair.of("c", 1L));
expectedPairs.add(Pair.of("d", 1L));
assertEquals(expectedPairs, values);
pipeline.done();
}
}
| 2,208 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkCogroupIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.lib.Cogroup;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.Tests;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
public class SparkCogroupIT {
@Rule
public TemporaryPath tmpDir = new TemporaryPath();
private SparkPipeline pipeline;
private PCollection<String> lines1;
private PCollection<String> lines2;
private PCollection<String> lines3;
private PCollection<String> lines4;
@Before
public void setUp() throws IOException {
pipeline = new SparkPipeline("local", "wordcount");
lines1 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src1.txt")));
lines2 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src2.txt")));
lines3 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src1.txt")));
lines4 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src2.txt")));
}
@After
public void tearDown() {
pipeline.done();
}
@Test
public void testCogroupWritables() {
runCogroup(WritableTypeFamily.getInstance());
}
@Test
public void testCogroupAvro() {
runCogroup(AvroTypeFamily.getInstance());
}
@Test
public void testCogroup3Writables() {
runCogroup3(WritableTypeFamily.getInstance());
}
@Test
public void testCogroup3Avro() {
runCogroup3(AvroTypeFamily.getInstance());
}
@Test
public void testCogroup4Writables() {
runCogroup4(WritableTypeFamily.getInstance());
}
@Test
public void testCogroup4Avro() {
runCogroup4(AvroTypeFamily.getInstance());
}
public void runCogroup(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, Pair<Collection<String>, Collection<String>>> cg = Cogroup.cogroup(kv1, kv2);
Map<String, Pair<Collection<String>, Collection<String>>> result = cg.materializeToMap();
Map<String, Pair<Collection<String>, Collection<String>>> actual = Maps.newHashMap();
for (Map.Entry<String, Pair<Collection<String>, Collection<String>>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
actual.put(e.getKey(), Pair.of(one, two));
}
Map<String, Pair<Collection<String>, Collection<String>>> expected = ImmutableMap.of(
"a", Pair.of(coll("1-1", "1-4"), coll()),
"b", Pair.of(coll("1-2"), coll("2-1")),
"c", Pair.of(coll("1-3"), coll("2-2", "2-3")),
"d", Pair.of(coll(), coll("2-4"))
);
assertThat(actual, is(expected));
}
public void runCogroup3(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, String> kv3 = lines3.parallelDo("kv3", new KeyValueSplit(), tt);
PTable<String, Tuple3.Collect<String, String, String>> cg = Cogroup.cogroup(kv1, kv2, kv3);
Map<String, Tuple3.Collect<String, String, String>> result = cg.materializeToMap();
Map<String, Tuple3.Collect<String, String, String>> actual = Maps.newHashMap();
for (Map.Entry<String, Tuple3.Collect<String, String, String>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
Collection<String> three = ImmutableSet.copyOf(e.getValue().third());
actual.put(e.getKey(), new Tuple3.Collect<String, String, String>(one, two, three));
}
Map<String, Tuple3.Collect<String, String, String>> expected = ImmutableMap.of(
"a", new Tuple3.Collect<String, String, String>(coll("1-1", "1-4"), coll(), coll("1-1", "1-4")),
"b", new Tuple3.Collect<String, String, String>(coll("1-2"), coll("2-1"), coll("1-2")),
"c", new Tuple3.Collect<String, String, String>(coll("1-3"), coll("2-2", "2-3"), coll("1-3")),
"d", new Tuple3.Collect<String, String, String>(coll(), coll("2-4"), coll())
);
assertThat(actual, is(expected));
}
public void runCogroup4(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, String> kv3 = lines3.parallelDo("kv3", new KeyValueSplit(), tt);
PTable<String, String> kv4 = lines4.parallelDo("kv4", new KeyValueSplit(), tt);
PTable<String, Tuple4.Collect<String, String, String, String>> cg = Cogroup.cogroup(kv1, kv2, kv3, kv4);
Map<String, Tuple4.Collect<String, String, String, String>> result = cg.materializeToMap();
Map<String, Tuple4.Collect<String, String, String, String>> actual = Maps.newHashMap();
for (Map.Entry<String, Tuple4.Collect<String, String, String, String>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
Collection<String> three = ImmutableSet.copyOf(e.getValue().third());
Collection<String> four = ImmutableSet.copyOf(e.getValue().fourth());
actual.put(e.getKey(), new Tuple4.Collect<String, String, String, String>(one, two, three, four));
}
Map<String, Tuple4.Collect<String, String, String, String>> expected = ImmutableMap.of(
"a", new Tuple4.Collect<String, String, String, String>(coll("1-1", "1-4"), coll(), coll("1-1", "1-4"), coll()),
"b", new Tuple4.Collect<String, String, String, String>(coll("1-2"), coll("2-1"), coll("1-2"), coll("2-1")),
"c", new Tuple4.Collect<String, String, String, String>(coll("1-3"), coll("2-2", "2-3"), coll("1-3"), coll("2-2", "2-3")),
"d", new Tuple4.Collect<String, String, String, String>(coll(), coll("2-4"), coll(), coll("2-4"))
);
assertThat(actual, is(expected));
}
private static class KeyValueSplit extends DoFn<String, Pair<String, String>> {
@Override
public void process(String input, Emitter<Pair<String, String>> emitter) {
String[] fields = input.split(",");
emitter.emit(Pair.of(fields[0], fields[1]));
}
}
private static Collection<String> coll(String... values) {
return ImmutableSet.copyOf(values);
}
}
| 2,209 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkTfidfIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.Collection;
import java.util.List;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.seq.SeqFileSourceTarget;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.lib.Join;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.hadoop.fs.Path;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
@SuppressWarnings("serial")
public class SparkTfidfIT implements Serializable {
@Rule
public transient TemporaryPath tmpDir = new TemporaryPath();
// total number of documents, should calculate
protected static final double N = 2;
private transient Pipeline pipeline;
@Before
public void setUp() throws Exception {
pipeline = new SparkPipeline("local", "tfidf");
}
@Test
public void testWritablesSingleRun() throws IOException {
run(pipeline, WritableTypeFamily.getInstance(), true);
}
@Test
public void testWritablesMultiRun() throws IOException {
run(pipeline, WritableTypeFamily.getInstance(), false);
}
/**
* This method should generate a TF-IDF score for the input.
*/
public PTable<String, Collection<Pair<String, Double>>> generateTFIDF(PCollection<String> docs, Path termFreqPath,
PTypeFamily ptf) throws IOException {
/*
* Input: String Input title text
*
* Output: PTable<Pair<String, String>, Long> Pair<Pair<word, title>, count
* in title>
*/
PTable<Pair<String, String>, Long> tf = Aggregate.count(docs.parallelDo("term document frequency",
new DoFn<String, Pair<String, String>>() {
@Override
public void process(String doc, Emitter<Pair<String, String>> emitter) {
String[] kv = doc.split("\t");
String title = kv[0];
String text = kv[1];
for (String word : text.split("\\W+")) {
if (word.length() > 0) {
Pair<String, String> pair = Pair.of(word.toLowerCase(), title);
emitter.emit(pair);
}
}
}
}, ptf.pairs(ptf.strings(), ptf.strings())));
tf.write(new SeqFileSourceTarget<Pair<Pair<String, String>, Long>>(termFreqPath, tf.getPType()));
/*
* Input: Pair<Pair<String, String>, Long> Pair<Pair<word, title>, count in
* title>
*
* Output: PTable<String, Long> PTable<word, # of docs containing word>
*/
PTable<String, Long> n = Aggregate.count(tf.parallelDo("little n (# of docs contain word)",
new DoFn<Pair<Pair<String, String>, Long>, String>() {
@Override
public void process(Pair<Pair<String, String>, Long> input, Emitter<String> emitter) {
emitter.emit(input.first().first());
}
}, ptf.strings()));
/*
* Input: Pair<Pair<String, String>, Long> Pair<Pair<word, title>, count in
* title>
*
* Output: PTable<String, Pair<String, Long>> PTable<word, Pair<title, count
* in title>>
*/
PTable<String, Collection<Pair<String, Long>>> wordDocumentCountPair = tf.parallelDo(
"transform wordDocumentPairCount",
new DoFn<Pair<Pair<String, String>, Long>, Pair<String, Collection<Pair<String, Long>>>>() {
Collection<Pair<String, Long>> buffer;
String key;
@Override
public void process(Pair<Pair<String, String>, Long> input,
Emitter<Pair<String, Collection<Pair<String, Long>>>> emitter) {
Pair<String, String> wordDocumentPair = input.first();
if (!wordDocumentPair.first().equals(key)) {
flush(emitter);
key = wordDocumentPair.first();
buffer = Lists.newArrayList();
}
buffer.add(Pair.of(wordDocumentPair.second(), input.second()));
}
protected void flush(Emitter<Pair<String, Collection<Pair<String, Long>>>> emitter) {
if (buffer != null) {
emitter.emit(Pair.of(key, buffer));
buffer = null;
}
}
@Override
public void cleanup(Emitter<Pair<String, Collection<Pair<String, Long>>>> emitter) {
flush(emitter);
}
}, ptf.tableOf(ptf.strings(), ptf.collections(ptf.pairs(ptf.strings(), ptf.longs()))));
PTable<String, Pair<Long, Collection<Pair<String, Long>>>> joinedResults = Join.join(n, wordDocumentCountPair);
/*
* Input: Pair<String, Pair<Long, Collection<Pair<String, Long>>> Pair<word,
* Pair<# of docs containing word, Collection<Pair<title, term frequency>>>
*
* Output: Pair<String, Collection<Pair<String, Double>>> Pair<word,
* Collection<Pair<title, tfidf>>>
*/
return joinedResults
.mapValues(
new MapFn<Pair<Long, Collection<Pair<String, Long>>>, Collection<Pair<String, Double>>>() {
@Override
public Collection<Pair<String, Double>> map(
Pair<Long, Collection<Pair<String, Long>>> input) {
Collection<Pair<String, Double>> tfidfs = Lists.newArrayList();
double n = input.first();
double idf = Math.log(N / n);
for (Pair<String, Long> tf : input.second()) {
double tfidf = tf.second() * idf;
tfidfs.add(Pair.of(tf.first(), tfidf));
}
return tfidfs;
}
}, ptf.collections(ptf.pairs(ptf.strings(), ptf.doubles())));
}
public void run(Pipeline pipeline, PTypeFamily typeFamily, boolean singleRun) throws IOException {
String inputFile = tmpDir.copyResourceFileName("docs.txt");
String outputPath1 = tmpDir.getFileName("output1");
String outputPath2 = tmpDir.getFileName("output2");
Path tfPath = tmpDir.getPath("termfreq");
PCollection<String> docs = pipeline.readTextFile(inputFile);
PTable<String, Collection<Pair<String, Double>>> results = generateTFIDF(docs, tfPath, typeFamily);
pipeline.writeTextFile(results, outputPath1);
if (!singleRun) {
pipeline.run();
}
PTable<String, Collection<Pair<String, Double>>> uppercased = results.mapKeys(
new MapFn<String, String>() {
@Override
public String map(String k1) {
return k1.toUpperCase();
}
}, results.getKeyType());
pipeline.writeTextFile(uppercased, outputPath2);
pipeline.done();
// Check the lowercase version...
File outputFile = new File(outputPath1, "part-r-00000");
List<String> lines = Files.readLines(outputFile, Charset.defaultCharset());
boolean passed = false;
for (String line : lines) {
if (line.startsWith("[the") && line.contains("B,0.6931471805599453")) {
passed = true;
break;
}
}
assertTrue(passed);
// ...and the uppercase version
outputFile = new File(outputPath2, "part-r-00000");
lines = Files.readLines(outputFile, Charset.defaultCharset());
passed = false;
for (String line : lines) {
if (line.startsWith("[THE") && line.contains("B,0.6931471805599453")) {
passed = true;
break;
}
}
assertTrue(passed);
}
}
| 2,210 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkPageRankIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.PTypes;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import java.util.Collection;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class SparkPageRankIT {
private static List<String> URLS = ImmutableList.of(
"www.A.com www.B.com",
"www.A.com www.C.com",
"www.A.com www.D.com",
"www.A.com www.E.com",
"www.B.com www.D.com",
"www.B.com www.E.com",
"www.C.com www.D.com",
"www.D.com www.B.com",
"www.E.com www.A.com",
"www.F.com www.B.com",
"www.F.com www.C.com");
public static class PageRankData {
public float score;
public float lastScore;
public List<String> urls;
public PageRankData() {
}
public PageRankData(float score, float lastScore, Iterable<String> urls) {
this.score = score;
this.lastScore = lastScore;
this.urls = Lists.newArrayList(urls);
}
public PageRankData next(float newScore) {
return new PageRankData(newScore, score, urls);
}
public float propagatedScore() {
return score / urls.size();
}
@Override
public String toString() {
return score + " " + lastScore + " " + urls;
}
}
@Rule
public TemporaryPath tmpDir = new TemporaryPath();
private Pipeline pipeline;
@Before
public void setUp() throws Exception {
pipeline = new SparkPipeline("local", "pagerank");
}
@Test
public void testAvroReflects() throws Exception {
PTypeFamily tf = AvroTypeFamily.getInstance();
PType<PageRankData> prType = Avros.reflects(PageRankData.class);
run(pipeline, prType, tf);
pipeline.done();
}
@Test
public void testWritablesJSON() throws Exception {
PTypeFamily tf = WritableTypeFamily.getInstance();
PType<PageRankData> prType = PTypes.jsonString(PageRankData.class, tf);
run(pipeline, prType, tf);
pipeline.done();
}
public static PTable<String, PageRankData> pageRank(PTable<String, PageRankData> input, final float d) {
PTypeFamily ptf = input.getTypeFamily();
PTable<String, Float> outbound = input.parallelDo(new DoFn<Pair<String, PageRankData>, Pair<String, Float>>() {
@Override
public void process(Pair<String, PageRankData> input, Emitter<Pair<String, Float>> emitter) {
PageRankData prd = input.second();
for (String link : prd.urls) {
emitter.emit(Pair.of(link, prd.propagatedScore()));
}
}
}, ptf.tableOf(ptf.strings(), ptf.floats()));
return input.cogroup(outbound).mapValues(
new MapFn<Pair<Collection<PageRankData>, Collection<Float>>, PageRankData>() {
@Override
public PageRankData map(Pair<Collection<PageRankData>, Collection<Float>> input) {
PageRankData prd = Iterables.getOnlyElement(input.first());
Collection<Float> propagatedScores = input.second();
float sum = 0.0f;
for (Float s : propagatedScores) {
sum += s;
}
return prd.next(d + (1.0f - d) * sum);
}
}, input.getValueType());
}
public static void run(Pipeline pipeline,
PType<PageRankData> prType, PTypeFamily ptf) throws Exception {
PTable<String, PageRankData> scores = pipeline.create(URLS, ptf.strings())
.parallelDo(new MapFn<String, Pair<String, String>>() {
@Override
public Pair<String, String> map(String input) {
String[] urls = input.split("\\s+");
return Pair.of(urls[0], urls[1]);
}
}, ptf.tableOf(ptf.strings(), ptf.strings())).groupByKey()
.mapValues(new MapFn<Iterable<String>, PageRankData>() {
@Override
public PageRankData map(Iterable<String> input) {
return new PageRankData(1.0f, 0.0f, input);
}
}, prType);
Float delta = 1.0f;
while (delta > 0.01) {
scores = pageRank(scores, 0.5f).cache();
delta = Aggregate.max(scores.parallelDo(new MapFn<Pair<String, PageRankData>, Float>() {
@Override
public Float map(Pair<String, PageRankData> input) {
PageRankData prd = input.second();
return Math.abs(prd.score - prd.lastScore);
}
}, ptf.floats())).getValue();
}
assertEquals(0.0048, delta, 0.001);
pipeline.done();
}
}
| 2,211 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkHFileTargetIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Resources;
import org.apache.commons.io.IOUtils;
import org.apache.crunch.fn.FilterFns;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.At;
import org.apache.crunch.io.hbase.HBaseTypes;
import org.apache.crunch.io.hbase.HFileUtils;
import org.apache.crunch.io.hbase.ToHBase;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.regionserver.KeyValueHeap;
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.StoreFileReader;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.crunch.types.writable.Writables.nulls;
import static org.apache.crunch.types.writable.Writables.tableOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class SparkHFileTargetIT implements Serializable {
private static HBaseTestingUtility HBASE_TEST_UTILITY;
private static final byte[] TEST_FAMILY = Bytes.toBytes("test_family");
private static final byte[] TEST_QUALIFIER = Bytes.toBytes("count");
private static final Path TEMP_DIR = new Path("/tmp");
private static final Random RANDOM = new Random();
private static final FilterFn<String> SHORT_WORD_FILTER = new FilterFn<String>() {
@Override
public boolean accept(String input) {
return input.length() <= 2;
}
};
@Rule
public transient TemporaryPath tmpDir = new TemporaryPath(RuntimeParameters.TMP_DIR, "hadoop.tmp.dir");
@BeforeClass
public static void setUpClass() throws Exception {
// We have to use mini mapreduce cluster, because LocalJobRunner allows only a single reducer
// (we will need it to test bulk load against multiple regions).
Configuration conf = HBaseConfiguration.create();
// Workaround for HBASE-5711, we need to set config value dfs.datanode.data.dir.perm
// equal to the permissions of the temp dirs on the filesystem. These temp dirs were
// probably created using this process' umask. So we guess the temp dir permissions as
// 0777 & ~umask, and use that to set the config value.
Process process = Runtime.getRuntime().exec("/bin/sh -c umask");
BufferedReader br = new BufferedReader(new InputStreamReader(process.getInputStream(), Charset.forName("UTF-8")));
int rc = process.waitFor();
if(rc == 0) {
String umask = br.readLine();
int umaskBits = Integer.parseInt(umask, 8);
int permBits = 0777 & ~umaskBits;
String perms = Integer.toString(permBits, 8);
conf.set("dfs.datanode.data.dir.perm", perms);
}
HBASE_TEST_UTILITY = new HBaseTestingUtility(conf);
HBASE_TEST_UTILITY.startMiniCluster(1);
}
private static Table createTable(int splits) throws Exception {
HColumnDescriptor hcol = new HColumnDescriptor(TEST_FAMILY);
return createTable(splits, hcol);
}
private static Table createTable(int splits, HColumnDescriptor... hcols) throws Exception {
TableName tableName = TableName.valueOf(Bytes.toBytes("test_table_" + RANDOM.nextInt(1000000000)));
HTableDescriptor htable = new HTableDescriptor(tableName);
for (HColumnDescriptor hcol : hcols) {
htable.addFamily(hcol);
}
return HBASE_TEST_UTILITY.createTable(htable,
Bytes.split(Bytes.toBytes("a"), Bytes.toBytes("z"), splits));
}
@AfterClass
public static void tearDownClass() throws Exception {
HBASE_TEST_UTILITY.shutdownMiniCluster();
}
@Before
public void setUp() throws IOException {
FileSystem fs = HBASE_TEST_UTILITY.getTestFileSystem();
fs.delete(TEMP_DIR, true);
}
@Test
public void testHFileTarget() throws Exception {
Pipeline pipeline = new SparkPipeline("local", "hfile",
SparkHFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration());
Path inputPath = copyResourceFileToHDFS("shakes.txt");
Path outputPath = getTempPathOnHDFS("out");
PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<String> words = split(shakespeare, "\\s+");
PTable<String, Long> wordCounts = words.count();
pipeline.write(convertToKeyValues(wordCounts), ToHBase.hfile(outputPath));
PipelineResult result = pipeline.run();
assertTrue(result.succeeded());
FileSystem fs = FileSystem.get(HBASE_TEST_UTILITY.getConfiguration());
KeyValue kv = readFromHFiles(fs, outputPath, "and");
assertEquals(375L, Bytes.toLong(CellUtil.cloneValue(kv)));
pipeline.done();
}
@Test
public void testBulkLoad() throws Exception {
Pipeline pipeline = new SparkPipeline("local", "hfile",
SparkHFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration());
Path inputPath = copyResourceFileToHDFS("shakes.txt");
Path outputPath = getTempPathOnHDFS("out");
byte[] columnFamilyA = Bytes.toBytes("colfamA");
byte[] columnFamilyB = Bytes.toBytes("colfamB");
Admin admin = HBASE_TEST_UTILITY.getAdmin();
Table testTable = createTable(26, new HColumnDescriptor(columnFamilyA), new HColumnDescriptor(columnFamilyB));
Connection connection = admin.getConnection();
RegionLocator regionLocator = connection.getRegionLocator(testTable.getName());
PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<String> words = split(shakespeare, "\\s+");
PTable<String,Long> wordCounts = words.count();
PCollection<Put> wordCountPuts = convertToPuts(wordCounts, columnFamilyA, columnFamilyB);
HFileUtils.writePutsToHFilesForIncrementalLoad(
wordCountPuts,
admin.getConnection(),
testTable.getName(),
outputPath);
PipelineResult result = pipeline.run();
assertTrue(result.succeeded());
new LoadIncrementalHFiles(HBASE_TEST_UTILITY.getConfiguration())
.doBulkLoad(outputPath, admin, testTable, regionLocator);
Map<String, Long> EXPECTED = ImmutableMap.<String, Long>builder()
.put("__EMPTY__", 1345L)
.put("the", 528L)
.put("and", 375L)
.put("I", 314L)
.put("of", 314L)
.build();
for (Map.Entry<String, Long> e : EXPECTED.entrySet()) {
assertEquals((long) e.getValue(), getWordCountFromTable(testTable, columnFamilyA, e.getKey()));
assertEquals((long) e.getValue(), getWordCountFromTable(testTable, columnFamilyB, e.getKey()));
}
pipeline.done();
}
/** See CRUNCH-251 */
@Test
public void testMultipleHFileTargets() throws Exception {
Pipeline pipeline = new SparkPipeline("local", "hfile",
SparkHFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration());
Path inputPath = copyResourceFileToHDFS("shakes.txt");
Path outputPath1 = getTempPathOnHDFS("out1");
Path outputPath2 = getTempPathOnHDFS("out2");
Admin admin = HBASE_TEST_UTILITY.getAdmin();
Table table1 = createTable(26);
Table table2 = createTable(26);
Connection connection = admin.getConnection();
RegionLocator regionLocator1 = connection.getRegionLocator(table1.getName());
RegionLocator regionLocator2 = connection.getRegionLocator(table2.getName());
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(HBASE_TEST_UTILITY.getConfiguration());
PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<String> words = split(shakespeare, "\\s+");
PCollection<String> shortWords = words.filter(SHORT_WORD_FILTER);
PCollection<String> longWords = words.filter(FilterFns.not(SHORT_WORD_FILTER));
PTable<String, Long> shortWordCounts = shortWords.count();
PTable<String, Long> longWordCounts = longWords.count();
HFileUtils.writePutsToHFilesForIncrementalLoad(
convertToPuts(shortWordCounts),
connection,
table1.getName(),
outputPath1);
HFileUtils.writePutsToHFilesForIncrementalLoad(
convertToPuts(longWordCounts),
connection,
table1.getName(),
outputPath2);
PipelineResult result = pipeline.run();
assertTrue(result.succeeded());
loader.doBulkLoad(outputPath1, admin, table1, regionLocator1);
loader.doBulkLoad(outputPath2, admin, table2, regionLocator2);
assertEquals(314L, getWordCountFromTable(table1, "of"));
assertEquals(375L, getWordCountFromTable(table2, "and"));
pipeline.done();
}
@Test
public void testHFileUsesFamilyConfig() throws Exception {
DataBlockEncoding newBlockEncoding = DataBlockEncoding.PREFIX;
assertNotSame(newBlockEncoding, DataBlockEncoding.valueOf(HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING));
Pipeline pipeline = new SparkPipeline("local", "hfile",
SparkHFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration());
Path inputPath = copyResourceFileToHDFS("shakes.txt");
Path outputPath = getTempPathOnHDFS("out");
Admin admin = HBASE_TEST_UTILITY.getAdmin();
HColumnDescriptor hcol = new HColumnDescriptor(TEST_FAMILY);
hcol.setDataBlockEncoding(newBlockEncoding);
Table testTable = createTable(26, hcol);
Connection connection = admin.getConnection();
PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings()));
PCollection<String> words = split(shakespeare, "\\s+");
PTable<String,Long> wordCounts = words.count();
PCollection<Put> wordCountPuts = convertToPuts(wordCounts);
HFileUtils.writePutsToHFilesForIncrementalLoad(
wordCountPuts,
connection,
testTable.getName(),
outputPath);
PipelineResult result = pipeline.run();
assertTrue(result.succeeded());
int hfilesCount = 0;
Configuration conf = HBASE_TEST_UTILITY.getConfiguration();
FileSystem fs = outputPath.getFileSystem(conf);
for (FileStatus e : fs.listStatus(new Path(outputPath, Bytes.toString(TEST_FAMILY)))) {
Path f = e.getPath();
if (!f.getName().startsWith("part-")) { // filter out "_SUCCESS"
continue;
}
HFile.Reader reader = null;
try {
reader = HFile.createReader(fs, f, new CacheConfig(conf), true, conf);
assertEquals(DataBlockEncoding.PREFIX, reader.getDataBlockEncoding());
} finally {
if (reader != null) {
reader.close();
}
}
hfilesCount++;
}
assertTrue(hfilesCount > 0);
pipeline.done();
}
private static PCollection<Put> convertToPuts(PTable<String, Long> in) {
return convertToPuts(in, TEST_FAMILY);
}
private static PCollection<Put> convertToPuts(PTable<String, Long> in, final byte[]...columnFamilies) {
return in.parallelDo(new MapFn<Pair<String, Long>, Put>() {
@Override
public Put map(Pair<String, Long> input) {
String w = input.first();
if (w.length() == 0) {
w = "__EMPTY__";
}
long c = input.second();
Put p = new Put(Bytes.toBytes(w));
for (byte[] columnFamily : columnFamilies) {
p.addColumn(columnFamily, TEST_QUALIFIER, Bytes.toBytes(c));
}
return p;
}
}, HBaseTypes.puts());
}
private static PCollection<KeyValue> convertToKeyValues(PTable<String, Long> in) {
return in.parallelDo(new MapFn<Pair<String, Long>, Pair<KeyValue, Void>>() {
@Override
public Pair<KeyValue, Void> map(Pair<String, Long> input) {
String w = input.first();
if (w.length() == 0) {
w = "__EMPTY__";
}
long c = input.second();
Cell cell = CellUtil.createCell(Bytes.toBytes(w), Bytes.toBytes(c));
return Pair.of(KeyValueUtil.copyToNewKeyValue(cell), null);
}
}, tableOf(HBaseTypes.keyValues(), nulls()))
.groupByKey(GroupingOptions.builder()
.sortComparatorClass(HFileUtils.KeyValueComparator.class)
.build())
.ungroup()
.keys();
}
private static PCollection<String> split(PCollection<String> in, final String regex) {
return in.parallelDo(new DoFn<String, String>() {
@Override
public void process(String input, Emitter<String> emitter) {
for (String w : input.split(regex)) {
emitter.emit(w);
}
}
}, Writables.strings());
}
/** Reads the first value on a given row from a bunch of hfiles. */
private static KeyValue readFromHFiles(FileSystem fs, Path mrOutputPath, String row) throws IOException {
List<KeyValueScanner> scanners = Lists.newArrayList();
KeyValue fakeKV = KeyValueUtil.createFirstOnRow(Bytes.toBytes(row));
for (FileStatus e : fs.listStatus(mrOutputPath)) {
Path f = e.getPath();
if (!f.getName().startsWith("part-")) { // filter out "_SUCCESS"
continue;
}
StoreFileReader reader = new StoreFileReader(
fs,
f,
new CacheConfig(fs.getConf()),
true,
new AtomicInteger(),
false,
fs.getConf());
StoreFileScanner scanner = reader.getStoreFileScanner(false, false, false, 0, 0, false);
scanner.seek(fakeKV); // have to call seek of each underlying scanner, otherwise KeyValueHeap won't work
scanners.add(scanner);
}
assertTrue(!scanners.isEmpty());
KeyValueScanner kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR);
boolean seekOk = kvh.seek(fakeKV);
assertTrue(seekOk);
Cell kv = kvh.next();
kvh.close();
return KeyValueUtil.copyToNewKeyValue(kv);
}
private static Path copyResourceFileToHDFS(String resourceName) throws IOException {
Configuration conf = HBASE_TEST_UTILITY.getConfiguration();
FileSystem fs = FileSystem.get(conf);
Path resultPath = getTempPathOnHDFS(resourceName);
InputStream in = null;
OutputStream out = null;
try {
in = Resources.getResource(resourceName).openConnection().getInputStream();
out = fs.create(resultPath);
IOUtils.copy(in, out);
} finally {
IOUtils.closeQuietly(in);
IOUtils.closeQuietly(out);
}
return resultPath;
}
private static Path getTempPathOnHDFS(String fileName) throws IOException {
Configuration conf = HBASE_TEST_UTILITY.getConfiguration();
FileSystem fs = FileSystem.get(conf);
Path result = new Path(TEMP_DIR, fileName);
return result.makeQualified(fs);
}
private static long getWordCountFromTable(Table table, String word) throws IOException {
return getWordCountFromTable(table, TEST_FAMILY, word);
}
private static long getWordCountFromTable(Table table, byte[] columnFamily, String word) throws IOException {
Get get = new Get(Bytes.toBytes(word));
get.addFamily(columnFamily);
byte[] value = table.get(get).value();
if (value == null) {
fail("no such row: " + word);
}
return Bytes.toLong(value);
}
}
| 2,212 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/CreateIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.Rule;
import org.junit.Test;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class CreateIT {
@Rule
public TemporaryPath tmpDir = new TemporaryPath();
@Test
public void testMRWritable() throws Exception {
run(new MRPipeline(CreateIT.class, tmpDir.getDefaultConfiguration()), WritableTypeFamily.getInstance());
}
@Test
public void testMRAvro() throws Exception {
run(new MRPipeline(CreateIT.class, tmpDir.getDefaultConfiguration()), AvroTypeFamily.getInstance());
}
@Test
public void testMemWritable() throws Exception {
run(MemPipeline.getInstance(), WritableTypeFamily.getInstance());
}
@Test
public void testMemAvro() throws Exception {
run(MemPipeline.getInstance(), AvroTypeFamily.getInstance());
}
@Test
public void testSparkWritable() throws Exception {
run(new SparkPipeline("local", "CreateIT", CreateIT.class, tmpDir.getDefaultConfiguration()),
WritableTypeFamily.getInstance());
}
@Test
public void testSparkAvro() throws Exception {
run(new SparkPipeline("local", "CreateIT", CreateIT.class, tmpDir.getDefaultConfiguration()),
AvroTypeFamily.getInstance());
}
public static void run(Pipeline p, PTypeFamily ptf) {
PTable<String, Long> in = p.create(
ImmutableList.of(
Pair.of("a", 2L), Pair.of("b", 3L), Pair.of("c", 5L),
Pair.of("a", 1L), Pair.of("b", 8L), Pair.of("c", 7L)),
ptf.tableOf(ptf.strings(), ptf.longs()),
CreateOptions.nameAndParallelism("in", 2));
PTable<String, Long> out = in.groupByKey().combineValues(Aggregators.SUM_LONGS());
Map<String, Long> values = out.materializeToMap();
assertEquals(3, values.size());
assertEquals(3L, values.get("a").longValue());
assertEquals(11L, values.get("b").longValue());
assertEquals(12L, values.get("c").longValue());
p.done();
}
}
| 2,213 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkSecondarySortIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.lib.SecondarySort;
import org.apache.crunch.test.CrunchTestSupport;
import org.junit.Test;
import java.io.Serializable;
import static org.apache.crunch.types.avro.Avros.*;
import static org.apache.crunch.types.avro.Avros.ints;
import static org.apache.crunch.types.avro.Avros.strings;
import static org.junit.Assert.assertEquals;
public class SparkSecondarySortIT extends CrunchTestSupport implements Serializable {
@Test
public void testSecondarySort() throws Exception {
Pipeline p = new SparkPipeline("local", "secondarysort");
String inputFile = tempDir.copyResourceFileName("secondary_sort_input.txt");
PTable<String, Pair<Integer, Integer>> in = p.read(From.textFile(inputFile))
.parallelDo(new MapFn<String, Pair<String, Pair<Integer, Integer>>>() {
@Override
public Pair<String, Pair<Integer, Integer>> map(String input) {
String[] pieces = input.split(",");
return Pair.of(pieces[0],
Pair.of(Integer.valueOf(pieces[1].trim()), Integer.valueOf(pieces[2].trim())));
}
}, tableOf(strings(), pairs(ints(), ints())));
Iterable<String> lines = SecondarySort.sortAndApply(in, new MapFn<Pair<String, Iterable<Pair<Integer, Integer>>>, String>() {
@Override
public String map(Pair<String, Iterable<Pair<Integer, Integer>>> input) {
Joiner j = Joiner.on(',');
return j.join(input.first(), j.join(input.second()));
}
}, strings()).materialize();
assertEquals(ImmutableList.of("one,[-5,10],[1,1],[2,-3]", "three,[0,-1]", "two,[1,7],[2,6],[4,5]"),
ImmutableList.copyOf(lines));
p.done();
}
}
| 2,214 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkPipelineCallableIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableMap;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.test.CrunchTestSupport;
import org.junit.Test;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class SparkPipelineCallableIT extends CrunchTestSupport {
@Test
public void testSparkShakes() throws Exception {
run(new SparkPipeline("local", "PC", SparkPipelineCallableIT.class, tempDir.getDefaultConfiguration()),
tempDir.copyResourceFileName("shakes.txt"), false /* fail */);
}
@Test
public void testFailure() throws Exception {
run(new SparkPipeline("local", "PC", SparkPipelineCallableIT.class, tempDir.getDefaultConfiguration()),
tempDir.copyResourceFileName("shakes.txt"), true /* fail */);
}
public static int INC1 = 0;
public static int INC2 = 0;
public static void run(Pipeline p, final String input, final boolean fail) {
PTable<String, Long> top3 = p.sequentialDo(new PipelineCallable<PCollection<String>>() {
@Override
public Status call() {
INC1 = 17;
return fail ? Status.FAILURE : Status.SUCCESS;
}
@Override
public PCollection<String> getOutput(Pipeline pipeline) {
return pipeline.readTextFile(input);
}
}.named("first"))
.sequentialDo("onInput", new PipelineCallable<PCollection<String>>() {
@Override
protected PCollection<String> getOutput(Pipeline pipeline) {
return getOnlyPCollection();
}
@Override
public Status call() throws Exception {
return Status.SUCCESS;
}
})
.count()
.sequentialDo("label", new PipelineCallable<PTable<String, Long>>() {
@Override
public Status call() {
INC2 = 29;
if (getPCollection("label") != null) {
return Status.SUCCESS;
}
return Status.FAILURE;
}
@Override
public PTable<String, Long> getOutput(Pipeline pipeline) {
return (PTable<String, Long>) getOnlyPCollection();
}
}.named("second"))
.top(3);
if (fail) {
assertFalse(p.run().succeeded());
} else {
Map<String, Long> counts = top3.materializeToMap();
assertEquals(ImmutableMap.of("", 697L, "Enter.", 7L, "Exeunt.", 21L), counts);
assertEquals(17, INC1);
assertEquals(29, INC2);
}
p.done();
}
}
| 2,215 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/test/StringWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.test;
import org.apache.crunch.MapFn;
/**
* Simple String wrapper for testing with Avro reflection.
*/
public class StringWrapper implements Comparable<StringWrapper> {
public static class StringToStringWrapperMapFn extends MapFn<String, StringWrapper> {
@Override
public StringWrapper map(String input) {
return wrap(input);
}
}
public static class StringWrapperToStringMapFn extends MapFn<StringWrapper, String> {
@Override
public String map(StringWrapper input) {
return input.getValue();
}
}
private String value;
public StringWrapper() {
this("");
}
public StringWrapper(String value) {
this.value = value;
}
@Override
public int compareTo(StringWrapper o) {
return this.value.compareTo(o.value);
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StringWrapper other = (StringWrapper) obj;
if (value == null) {
if (other.value != null)
return false;
} else if (!value.equals(other.value))
return false;
return true;
}
@Override
public String toString() {
return "StringWrapper [value=" + value + "]";
}
public static StringWrapper wrap(String value) {
return new StringWrapper(value);
}
}
| 2,216 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/test/Tests.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.test;
import static com.google.common.base.Preconditions.checkNotNull;
public class Tests {
/**
* This doesn't check whether the resource exists!
*
* @param testCase
* @param resourceName
* @return The path to the resource (never null)
*/
public static String resource(Object testCase, String resourceName) {
checkNotNull(testCase);
checkNotNull(resourceName);
// Note: We append "Data" because otherwise Eclipse would complain about the
// the case's class name clashing with the resource directory's name.
return testCase.getClass().getName().replaceAll("\\.", "/") + "Data/" + resourceName;
}
}
| 2,217 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/ByteArrayHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.primitives.UnsignedBytes;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryData;
import java.io.Serializable;
import java.util.Arrays;
public abstract class ByteArrayHelper implements Serializable {
public static final ByteArrayHelper WRITABLES = new ByteArrayHelper() {
@Override
boolean equal(byte[] left, byte[] right) {
return Arrays.equals(left, right);
}
@Override
int hashCode(byte[] value) {
return value != null ? Arrays.hashCode(value) : 0;
}
@Override
int compare(byte[] left, byte[] right) {
return UnsignedBytes.lexicographicalComparator().compare(left, right);
}
};
public static ByteArrayHelper forAvroSchema(Schema schema) {
return new AvroByteArrayHelper(schema);
}
abstract boolean equal(byte[] left, byte[] right);
abstract int hashCode(byte[] value);
abstract int compare(byte[] left, byte[] right);
static class AvroByteArrayHelper extends ByteArrayHelper {
private String jsonSchema;
private transient Schema schema;
public AvroByteArrayHelper(Schema schema) {
this.jsonSchema = schema.toString();
}
private Schema getSchema() {
if (schema == null) {
schema = new Schema.Parser().parse(jsonSchema);
}
return schema;
}
@Override
boolean equal(byte[] left, byte[] right) {
return compare(left, right) == 0;
}
@Override
int hashCode(byte[] value) {
return BinaryData.hashCode(value, 0, value.length, getSchema());
}
@Override
int compare(byte[] left, byte[] right) {
return BinaryData.compare(left, 0, right, 0, getSchema());
}
}
}
| 2,218 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
public class SparkPartitioner extends org.apache.spark.Partitioner {
private final int numPartitions;
public SparkPartitioner(int numPartitions) {
this.numPartitions = numPartitions;
}
@Override
public int numPartitions() {
return numPartitions;
}
@Override
public int getPartition(Object key) {
return ((IntByteArray) key).partition;
}
}
| 2,219 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkRuntimeContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.base.Joiner;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.ByteStreams;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.mapred.SparkCounter;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.spark.Accumulator;
import org.apache.spark.SparkFiles;
import org.apache.spark.broadcast.Broadcast;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.util.List;
import java.util.Map;
public class SparkRuntimeContext implements Serializable {
private String jobName;
private Broadcast<byte[]> broadConf;
private final Accumulator<Map<String, Map<String, Long>>> counters;
private transient Configuration conf;
private transient TaskInputOutputContext context;
private transient Integer lastTID;
public SparkRuntimeContext(
String jobName,
Accumulator<Map<String, Map<String, Long>>> counters,
Broadcast<byte[]> broadConf) {
this.jobName = jobName;
this.counters = counters;
this.broadConf = broadConf;
}
public void setConf(Broadcast<byte[]> broadConf) {
this.broadConf = broadConf;
this.conf = null;
}
public void initialize(DoFn<?, ?> fn, Integer tid) {
if (context == null || !Objects.equal(lastTID, tid)) {
TaskAttemptID attemptID;
if (tid != null) {
TaskID taskId = new TaskID(new JobID(jobName, 0), false, tid);
attemptID = new TaskAttemptID(taskId, 0);
lastTID = tid;
} else {
attemptID = new TaskAttemptID();
lastTID = null;
}
configureLocalFiles();
context = new MapContextImpl(getConfiguration(), attemptID, null, null, null, new SparkReporter(counters), null);
}
fn.setContext(context);
fn.initialize();
}
private void configureLocalFiles() {
try {
URI[] uris = DistributedCache.getCacheFiles(getConfiguration());
if (uris != null) {
List<String> allFiles = Lists.newArrayList();
for (URI uri : uris) {
File f = new File(uri.getPath());
allFiles.add(SparkFiles.get(f.getName()));
}
String sparkFiles = Joiner.on(',').join(allFiles);
// Hacking this for Hadoop1 and Hadoop2
getConfiguration().set("mapreduce.job.cache.local.files", sparkFiles);
getConfiguration().set("mapred.cache.localFiles", sparkFiles);
}
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
public Configuration getConfiguration() {
if (conf == null) {
conf = new Configuration();
try {
conf.readFields(ByteStreams.newDataInput(broadConf.value()));
} catch (Exception e) {
throw new RuntimeException("Error reading broadcast configuration", e);
}
}
return conf;
}
private static class SparkReporter extends StatusReporter implements Serializable {
Accumulator<Map<String, Map<String, Long>>> accum;
private transient Map<String, Map<String, Counter>> counters;
public SparkReporter(Accumulator<Map<String, Map<String, Long>>> accum) {
this.accum = accum;
this.counters = Maps.newHashMap();
}
@Override
public Counter getCounter(Enum<?> anEnum) {
return getCounter(anEnum.getDeclaringClass().toString(), anEnum.name());
}
@Override
public Counter getCounter(String group, String name) {
Map<String, Counter> grp = counters.get(group);
if (grp == null) {
grp = Maps.newTreeMap();
counters.put(group, grp);
}
if (!grp.containsKey(name)) {
grp.put(name, new SparkCounter(group, name, accum));
}
return grp.get(name);
}
@Override
public void progress() {
}
@Override
public float getProgress() {
return 0;
}
@Override
public void setStatus(String s) {
}
}
}
| 2,220 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/GuavaUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.base.Function;
import org.apache.crunch.Pair;
import scala.Tuple2;
import javax.annotation.Nullable;
public class GuavaUtils {
public static <K, V> Function<Tuple2<K, V>, Pair<K, V>> tuple2PairFunc() {
return new Function<Tuple2<K, V>, Pair<K, V>>() {
@Override
public Pair<K, V> apply(@Nullable Tuple2<K, V> kv) {
return kv == null ? null : Pair.of(kv._1(), kv._2());
}
};
}
public static <K, V> Function<Pair<K, V>, Tuple2<K, V>> pair2tupleFunc() {
return new Function<Pair<K, V>, Tuple2<K, V>>() {
@Override
public Tuple2<K, V> apply(@Nullable Pair<K, V> kv) {
return kv == null ? null : new Tuple2<K, V>(kv.first(), kv.second());
}
};
}
}
| 2,221 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/CounterAccumulatorParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.collect.Maps;
import org.apache.spark.AccumulatorParam;
import java.util.Map;
public class CounterAccumulatorParam implements AccumulatorParam<Map<String, Map<String, Long>>> {
@Override
public Map<String, Map<String, Long>> addAccumulator(
Map<String, Map<String, Long>> current,
Map<String, Map<String, Long>> added) {
for (Map.Entry<String, Map<String, Long>> e : added.entrySet()) {
Map<String, Long> grp = current.get(e.getKey());
if (grp == null) {
grp = Maps.newTreeMap();
current.put(e.getKey(), grp);
}
for (Map.Entry<String, Long> f : e.getValue().entrySet()) {
Long cnt = grp.get(f.getKey());
cnt = (cnt == null) ? f.getValue() : cnt + f.getValue();
grp.put(f.getKey(), cnt);
}
}
return current;
}
@Override
public Map<String, Map<String, Long>> addInPlace(
Map<String, Map<String, Long>> first,
Map<String, Map<String, Long>> second) {
return addAccumulator(first, second);
}
@Override
public Map<String, Map<String, Long>> zero(Map<String, Map<String, Long>> counts) {
return Maps.newHashMap();
}
}
| 2,222 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import org.apache.avro.mapred.AvroKeyComparator;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableType;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.IOException;
import java.io.Serializable;
import java.util.Comparator;
public class SparkComparator implements Comparator<ByteArray>, Serializable {
private final Class<? extends RawComparator> cmpClass;
private final GroupingOptions options;
private final PGroupedTableType ptype;
private final SparkRuntimeContext ctxt;
private transient RawComparator<?> cmp;
public SparkComparator(GroupingOptions options,
PGroupedTableType ptype,
SparkRuntimeContext ctxt) {
if (options.getSortComparatorClass() != null) {
this.cmpClass = options.getSortComparatorClass();
} else if (AvroTypeFamily.getInstance().equals(ptype.getFamily())) {
this.cmpClass = AvroKeyComparator.class;
} else {
this.cmpClass = null;
}
this.options = options;
this.ptype = ptype;
this.ctxt = ctxt;
}
@Override
public int compare(ByteArray s1, ByteArray s2) {
byte[] b1 = s1.value;
byte[] b2 = s2.value;
return rawComparator().compare(b1, 0, b1.length, b2, 0, b2.length);
}
private RawComparator<?> rawComparator() {
if (cmp == null) {
try {
ptype.initialize(ctxt.getConfiguration());
Job job = new Job(ctxt.getConfiguration());
ptype.configureShuffle(job, options);
if (cmpClass != null) {
cmp = ReflectionUtils.newInstance(cmpClass, job.getConfiguration());
} else {
cmp = WritableComparator.get(((WritableType) ptype.getTableType().getKeyType()).getSerializationClass());
if (cmp == null) {
cmp = new ByteWritable.Comparator();
}
}
} catch (IOException e) {
throw new CrunchRuntimeException("Error configuring sort comparator", e);
}
}
return cmp;
}
}
| 2,223 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import org.apache.spark.api.java.JavaRDDLike;
public interface SparkCollection {
JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime);
}
| 2,224 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkPipeline.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.CreateOptions;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.PipelineExecution;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.spark.collect.CreatedCollection;
import org.apache.crunch.impl.spark.collect.CreatedTable;
import org.apache.crunch.impl.spark.collect.EmptyPCollection;
import org.apache.crunch.impl.spark.collect.EmptyPTable;
import org.apache.crunch.impl.spark.collect.SparkCollectFactory;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
public class SparkPipeline extends DistributedPipeline {
private static final Logger LOG = LoggerFactory.getLogger(SparkPipeline.class);
private final String sparkConnect;
private JavaSparkContext sparkContext;
private Class<?> jarClass;
private final Map<PCollection<?>, StorageLevel> cachedCollections = Maps.newHashMap();
public SparkPipeline(String sparkConnect, String appName) {
this(sparkConnect, appName, null);
}
public SparkPipeline(String sparkConnect, String appName, Class<?> jarClass) {
this(sparkConnect, appName, jarClass, new Configuration());
}
public SparkPipeline(String sparkConnect, String appName, Class<?> jarClass, Configuration conf) {
super(appName, conf, new SparkCollectFactory());
this.sparkConnect = Preconditions.checkNotNull(sparkConnect);
this.jarClass = jarClass;
}
public SparkPipeline(JavaSparkContext sparkContext, String appName) {
this(sparkContext, appName, null, sparkContext != null? sparkContext.hadoopConfiguration(): new Configuration());
}
public SparkPipeline(JavaSparkContext sparkContext, String appName, Class<?> jarClass, Configuration conf) {
super(appName, conf, new SparkCollectFactory());
this.sparkContext = Preconditions.checkNotNull(sparkContext);
this.sparkConnect = sparkContext.getSparkHome().orNull();
this.jarClass = jarClass;
}
@Override
public <T> Iterable<T> materialize(PCollection<T> pcollection) {
ReadableSource<T> readableSrc = getMaterializeSourceTarget(pcollection);
MaterializableIterable<T> c = new MaterializableIterable<T>(this, readableSrc);
if (!outputTargetsToMaterialize.containsKey(pcollection)) {
outputTargetsToMaterialize.put((PCollectionImpl) pcollection, c);
}
return c;
}
@Override
public <S> PCollection<S> emptyPCollection(PType<S> ptype) {
return new EmptyPCollection<S>(this, ptype);
}
@Override
public <K, V> PTable<K, V> emptyPTable(PTableType<K, V> ptype) {
return new EmptyPTable<K, V>(this, ptype);
}
@Override
public <S> PCollection<S> create(Iterable<S> contents, PType<S> ptype, CreateOptions options) {
return new CreatedCollection<S>(this, contents, ptype, options);
}
@Override
public <K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype, CreateOptions options) {
return new CreatedTable<K, V>(this, contents, ptype, options);
}
@Override
public <T> void cache(PCollection<T> pcollection, CachingOptions options) {
cachedCollections.put(pcollection, toStorageLevel(options));
}
private StorageLevel toStorageLevel(CachingOptions options) {
return StorageLevel.apply(
options.useDisk(),
options.useMemory(),
options.deserialized(),
options.replicas());
}
@Override
public PipelineResult run() {
try {
PipelineExecution exec = runAsync();
exec.waitUntilDone();
return exec.getResult();
} catch (Exception e) {
LOG.error("Exception running pipeline", e);
return PipelineResult.EMPTY;
}
}
@Override
public PipelineExecution runAsync() {
Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize = Maps.newHashMap();
for (PCollectionImpl<?> c : outputTargets.keySet()) {
if (outputTargetsToMaterialize.containsKey(c)) {
toMaterialize.put(c, outputTargetsToMaterialize.get(c));
outputTargetsToMaterialize.remove(c);
}
}
Configuration conf = getConfiguration();
if (sparkContext == null) {
SparkConf sparkConf = new SparkConf();
for (Map.Entry<String, String> e : conf) {
if (e.getKey().startsWith("spark.")) {
sparkConf.set(e.getKey(), e.getValue());
}
}
this.sparkContext = new JavaSparkContext(sparkConnect, getName(), sparkConf);
if (jarClass != null) {
String[] jars = JavaSparkContext.jarOfClass(jarClass);
if (jars != null && jars.length > 0) {
for (String jar : jars) {
sparkContext.addJar(jar);
}
}
}
}
copyConfiguration(conf, sparkContext.hadoopConfiguration());
SparkRuntime runtime = new SparkRuntime(this, sparkContext, conf, outputTargets,
toMaterialize, cachedCollections, allPipelineCallables);
runtime.execute();
outputTargets.clear();
return runtime;
}
@Override
public PipelineResult done() {
PipelineResult res = super.done();
if (sparkContext != null) {
sparkContext.stop();
sparkContext = null;
}
return res;
}
private static void copyConfiguration(Configuration from, Configuration to) {
for (Map.Entry<String, String> e : from) {
to.set(e.getKey(), e.getValue());
}
}
}
| 2,225 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/SparkRuntime.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.crunch.CombineFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.PipelineExecution;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.spark.fn.MapFunction;
import org.apache.crunch.impl.spark.fn.OutputConverterFunction;
import org.apache.crunch.impl.spark.fn.PairMapFunction;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.crunch.io.MapReduceTarget;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.Accumulator;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
public class SparkRuntime extends AbstractFuture<PipelineResult> implements PipelineExecution {
private static final Logger LOG = LoggerFactory.getLogger(SparkRuntime.class);
private SparkPipeline pipeline;
private JavaSparkContext sparkContext;
private Configuration conf;
private CombineFn combineFn;
private SparkRuntimeContext ctxt;
private Accumulator<Map<String, Map<String, Long>>> counters;
private Map<PCollectionImpl<?>, Set<Target>> outputTargets;
private Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize;
private Map<PCollection<?>, StorageLevel> toCache;
private Map<PipelineCallable<?>, Set<Target>> allPipelineCallables;
private Set<PipelineCallable<?>> activePipelineCallables;
private final CountDownLatch doneSignal = new CountDownLatch(1);
private AtomicReference<Status> status = new AtomicReference<Status>(Status.READY);
private boolean started;
private Thread monitorThread;
// Note that this is the oppposite of the MR sort
static final Comparator<PCollectionImpl<?>> DEPTH_COMPARATOR = new Comparator<PCollectionImpl<?>>() {
@Override
public int compare(PCollectionImpl<?> left, PCollectionImpl<?> right) {
int cmp = left.getDepth() - right.getDepth();
if (cmp == 0) {
// Ensure we don't throw away two output collections at the same depth.
// Using the collection name would be nicer here, but names aren't
// necessarily unique.
cmp = new Integer(left.hashCode()).compareTo(right.hashCode());
}
return cmp;
}
};
public SparkRuntime(SparkPipeline pipeline,
JavaSparkContext sparkContext,
Configuration conf,
Map<PCollectionImpl<?>, Set<Target>> outputTargets,
Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize,
Map<PCollection<?>, StorageLevel> toCache,
Map<PipelineCallable<?>, Set<Target>> allPipelineCallables) {
this.pipeline = pipeline;
this.sparkContext = sparkContext;
this.conf = conf;
this.counters = sparkContext.accumulator(Maps.<String, Map<String, Long>>newHashMap(), pipeline.getName(),
new CounterAccumulatorParam());
this.ctxt = new SparkRuntimeContext(sparkContext.appName(), counters,
sparkContext.broadcast(WritableUtils.toByteArray(conf)));
this.outputTargets = Maps.newTreeMap(DEPTH_COMPARATOR);
this.outputTargets.putAll(outputTargets);
this.toMaterialize = toMaterialize;
this.toCache = toCache;
this.allPipelineCallables = allPipelineCallables;
this.activePipelineCallables = allPipelineCallables.keySet();
this.status.set(Status.READY);
this.monitorThread = new Thread(new Runnable() {
@Override
public void run() {
monitorLoop();
}
});
}
public void setCombineFn(CombineFn combineFn) {
this.combineFn = combineFn;
}
public CombineFn getCombineFn() {
CombineFn ret = combineFn;
this.combineFn = null;
return ret;
}
private void distributeFiles() {
try {
URI[] uris = DistributedCache.getCacheFiles(conf);
if (uris != null) {
URI[] outURIs = new URI[uris.length];
for (int i = 0; i < uris.length; i++) {
Path path = new Path(uris[i]);
FileSystem fs = path.getFileSystem(conf);
if (fs.isFile(path)) {
outURIs[i] = uris[i];
} else {
Path mergePath = new Path(path.getParent(), "sparkreadable-" + path.getName());
FileUtil.copyMerge(fs, path, fs, mergePath, false, conf, "");
outURIs[i] = mergePath.toUri();
}
sparkContext.addFile(outURIs[i].toString());
}
DistributedCache.setCacheFiles(outURIs, conf);
}
} catch (IOException e) {
throw new RuntimeException("Error retrieving cache files", e);
}
}
public synchronized SparkRuntime execute() {
if (!started) {
monitorThread.start();
started = true;
}
return this;
}
public JavaSparkContext getSparkContext() {
return sparkContext;
}
public SparkRuntimeContext getRuntimeContext() {
return ctxt;
}
public Configuration getConfiguration() {
return conf;
}
public boolean isValid(JavaRDDLike<?, ?> rdd) {
return (rdd != null); //TODO: support multi-contexts
}
public StorageLevel getStorageLevel(PCollection<?> pcollection) {
return toCache.get(pcollection);
}
@Override
public String getPlanDotFile() {
return "";
}
@Override
public Map<String, String> getNamedDotFiles() {
return ImmutableMap.of("", "");
}
@Override
public void waitFor(long timeout, TimeUnit timeUnit) throws InterruptedException {
doneSignal.await(timeout, timeUnit);
}
@Override
public void waitUntilDone() throws InterruptedException {
doneSignal.await();
}
private void runCallables(Set<Target> unfinished) {
Set<PipelineCallable<?>> oldCallables = activePipelineCallables;
activePipelineCallables = Sets.newHashSet();
List<PipelineCallable<?>> callablesToRun = Lists.newArrayList();
List<PipelineCallable<?>> failedCallables = Lists.newArrayList();
for (PipelineCallable<?> pipelineCallable : oldCallables) {
if (Sets.intersection(allPipelineCallables.get(pipelineCallable), unfinished).isEmpty()) {
if (pipelineCallable.runSingleThreaded()) {
try {
if (pipelineCallable.call() != PipelineCallable.Status.SUCCESS) {
failedCallables.add(pipelineCallable);
}
} catch (Throwable t) {
pipelineCallable.setMessage(t.getLocalizedMessage());
failedCallables.add(pipelineCallable);
}
} else {
callablesToRun.add(pipelineCallable);
}
} else {
// Still need to run this one
activePipelineCallables.add(pipelineCallable);
}
}
ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
try {
List<Future<PipelineCallable.Status>> res = es.invokeAll(callablesToRun);
for (int i = 0; i < res.size(); i++) {
if (res.get(i).get() != PipelineCallable.Status.SUCCESS) {
failedCallables.add((PipelineCallable) callablesToRun.get(i));
}
}
} catch (Throwable t) {
t.printStackTrace();
failedCallables.addAll((List) callablesToRun);
} finally {
es.shutdownNow();
}
if (!failedCallables.isEmpty()) {
LOG.error("{} callable failure(s) occurred:", failedCallables.size());
for (PipelineCallable<?> c : failedCallables) {
LOG.error("{} : {}", c.getName(), c.getMessage());
}
status.set(Status.FAILED);
set(PipelineResult.EMPTY);
doneSignal.countDown();
}
}
private void monitorLoop() {
status.set(Status.RUNNING);
long start = System.currentTimeMillis();
Map<PCollectionImpl<?>, Set<Target>> targetDeps = Maps.newTreeMap(DEPTH_COMPARATOR);
Set<Target> unfinished = Sets.newHashSet();
for (PCollectionImpl<?> pcollect : outputTargets.keySet()) {
targetDeps.put(pcollect, pcollect.getTargetDependencies());
unfinished.addAll(outputTargets.get(pcollect));
}
runCallables(unfinished);
while (!targetDeps.isEmpty() && doneSignal.getCount() > 0) {
Set<Target> allTargets = Sets.newHashSet();
for (PCollectionImpl<?> pcollect : targetDeps.keySet()) {
allTargets.addAll(outputTargets.get(pcollect));
}
Map<PCollectionImpl<?>, JavaRDDLike<?, ?>> pcolToRdd = Maps.newTreeMap(DEPTH_COMPARATOR);
for (PCollectionImpl<?> pcollect : targetDeps.keySet()) {
if (Sets.intersection(allTargets, targetDeps.get(pcollect)).isEmpty()) {
JavaRDDLike<?, ?> rdd = ((SparkCollection) pcollect).getJavaRDDLike(this);
pcolToRdd.put(pcollect, rdd);
}
}
distributeFiles();
for (Map.Entry<PCollectionImpl<?>, JavaRDDLike<?, ?>> e : pcolToRdd.entrySet()) {
JavaRDDLike<?, ?> rdd = e.getValue();
PType<?> ptype = e.getKey().getPType();
Set<Target> targets = outputTargets.get(e.getKey());
if (targets.size() > 1) {
rdd.rdd().cache();
}
for (Target t : targets) {
Configuration conf = new Configuration(getConfiguration());
getRuntimeContext().setConf(sparkContext.broadcast(WritableUtils.toByteArray(conf)));
if (t instanceof MapReduceTarget) { //TODO: check this earlier
Converter c = t.getConverter(ptype);
IdentityFn ident = IdentityFn.getInstance();
JavaPairRDD<?, ?> outRDD;
if (rdd instanceof JavaRDD) {
outRDD = ((JavaRDD) rdd)
.map(new MapFunction(c.applyPTypeTransforms() ? ptype.getOutputMapFn() : ident, ctxt))
.mapToPair(new OutputConverterFunction(c));
} else {
outRDD = ((JavaPairRDD) rdd)
.map(new PairMapFunction(c.applyPTypeTransforms() ? ptype.getOutputMapFn() : ident, ctxt))
.mapToPair(new OutputConverterFunction(c));
}
try {
Job job = new Job(conf);
if (t instanceof PathTarget) {
PathTarget pt = (PathTarget) t;
pt.configureForMapReduce(job, ptype, pt.getPath(), "out0");
CrunchOutputs.OutputConfig outConfig =
CrunchOutputs.getNamedOutputs(job.getConfiguration()).get("out0");
job.setOutputFormatClass(outConfig.bundle.getFormatClass());
job.setOutputKeyClass(outConfig.keyClass);
job.setOutputValueClass(outConfig.valueClass);
outConfig.bundle.configure(job.getConfiguration());
job.getConfiguration().set("crunch.namedoutput", "out0");
Path tmpPath = pipeline.createTempPath();
outRDD.saveAsNewAPIHadoopFile(
tmpPath.toString(),
c.getKeyClass(),
c.getValueClass(),
job.getOutputFormatClass(),
job.getConfiguration());
pt.handleOutputs(job.getConfiguration(), tmpPath, -1);
} else { //if (t instanceof MapReduceTarget) {
MapReduceTarget mrt = (MapReduceTarget) t;
mrt.configureForMapReduce(job, ptype, pipeline.createTempPath(), "out0");
CrunchOutputs.OutputConfig outConfig =
CrunchOutputs.getNamedOutputs(job.getConfiguration()).get("out0");
job.setOutputFormatClass(outConfig.bundle.getFormatClass());
job.setOutputKeyClass(outConfig.keyClass);
job.setOutputValueClass(outConfig.valueClass);
outRDD.saveAsHadoopDataset(new JobConf(job.getConfiguration()));
}
} catch (Exception et) {
LOG.error("Spark Exception", et);
status.set(Status.FAILED);
set(PipelineResult.EMPTY);
doneSignal.countDown();
}
}
}
unfinished.removeAll(targets);
}
if (status.get() == Status.RUNNING) {
for (PCollectionImpl<?> output : pcolToRdd.keySet()) {
if (toMaterialize.containsKey(output)) {
MaterializableIterable mi = toMaterialize.get(output);
if (mi.isSourceTarget()) {
output.materializeAt((SourceTarget) mi.getSource());
}
}
targetDeps.remove(output);
}
}
runCallables(unfinished);
}
if (status.get() != Status.FAILED || status.get() != Status.KILLED) {
status.set(Status.SUCCEEDED);
set(new PipelineResult(
ImmutableList.of(new PipelineResult.StageResult("Spark", getCounters(),
start, System.currentTimeMillis())),
Status.SUCCEEDED));
} else {
set(PipelineResult.EMPTY);
}
doneSignal.countDown();
}
private Counters getCounters() {
Counters c = new Counters();
Map<String, Map<String, Long>> values = counters.value();
for (Map.Entry<String, Map<String, Long>> e : values.entrySet()) {
CounterGroup cg = c.getGroup(e.getKey());
for (Map.Entry<String, Long> f : e.getValue().entrySet()) {
cg.findCounter(f.getKey()).setValue(f.getValue());
}
}
return c;
}
@Override
public PipelineResult get() throws InterruptedException, ExecutionException {
if (getStatus() == Status.READY) {
execute();
}
return super.get();
}
@Override
public PipelineResult get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException,
ExecutionException {
if (getStatus() == Status.READY) {
execute();
}
return super.get(timeout, unit);
}
@Override
public Status getStatus() {
return status.get();
}
@Override
public PipelineResult getResult() {
try {
return get();
} catch (Exception e) {
LOG.error("Exception retrieving PipelineResult, returning EMPTY", e);
return PipelineResult.EMPTY;
}
}
@Override
public void kill() throws InterruptedException {
if (started) {
sparkContext.stop();
set(PipelineResult.EMPTY);
}
}
}
| 2,226 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/ByteArray.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import java.io.Serializable;
public class ByteArray implements Serializable, Comparable<ByteArray> {
public final byte[] value;
protected final ByteArrayHelper helper;
public ByteArray(byte[] value, ByteArrayHelper helper) {
this.value = value;
this.helper = helper;
}
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ByteArray byteArray = (ByteArray) o;
return helper.equal(value, byteArray.value);
}
@Override
public int hashCode() {
return helper.hashCode(value);
}
@Override
public int compareTo(ByteArray other) {
return helper.compare(value, other.value);
}
}
| 2,227 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/IntByteArray.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
public class IntByteArray extends ByteArray {
public final int partition;
public IntByteArray(int partition, ByteArray delegate) {
super(delegate.value, delegate.helper);
this.partition = partition;
}
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
IntByteArray that = (IntByteArray) o;
if (partition != that.partition) return false;
return true;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + partition;
}
}
| 2,228 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/CreatedCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.crunch.CreateOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.crunch.impl.spark.serde.SerDeFactory;
import org.apache.crunch.types.PType;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.storage.StorageLevel;
import java.io.IOException;
import java.util.List;
/**
* Represents a Spark-based PCollection that was created from a Java {@code Iterable} of
* values.
*/
public class CreatedCollection<T> extends PCollectionImpl<T> implements SparkCollection {
private final Iterable<T> contents;
private final PType<T> ptype;
private final int parallelism;
private JavaRDD<T> rdd;
public CreatedCollection(SparkPipeline p, Iterable<T> contents, PType<T> ptype, CreateOptions options) {
super(options.getName(), p);
this.contents = contents;
this.ptype = ptype;
this.parallelism = options.getParallelism();
}
@Override
protected void acceptInternal(Visitor visitor) {
// No-op
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
protected ReadableData<T> getReadableDataInternal() {
try {
return ptype.createSourceTarget(getPipeline().getConfiguration(),
getPipeline().createTempPath(), contents, parallelism).asReadable();
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
@Override
protected long getSizeInternal() {
return Iterables.size(contents);
}
@Override
public long getLastModifiedAt() {
return -1;
}
@Override
public PType<T> getPType() {
return ptype;
}
@Override
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaRDD<T> getJavaRDDLikeInternal(SparkRuntime runtime) {
SerDe serde = SerDeFactory.create(ptype, runtime.getConfiguration());
ptype.initialize(runtime.getConfiguration());
List<ByteArray> res = Lists.newLinkedList();
try {
for (T value : contents) {
res.add(serde.toBytes(ptype.getOutputMapFn().map(value)));
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
return runtime.getSparkContext()
.parallelize(res, parallelism)
.map(new MapInputFn<T>(serde, ptype.getInputMapFn(), runtime.getRuntimeContext()));
}
static class MapInputFn<T> implements Function<ByteArray, T> {
private final SerDe serde;
private final MapFn<Object, T> fn;
private final SparkRuntimeContext context;
private boolean initialized;
public MapInputFn(SerDe serde, MapFn<Object, T> fn, SparkRuntimeContext context) {
this.serde = serde;
this.fn = fn;
this.context = context;
this.initialized = false;
}
@Override
public T call(ByteArray byteArray) throws Exception {
if (!initialized) {
context.initialize(fn, -1);
initialized = true;
}
return fn.map(serde.fromBytes(byteArray.value));
}
}
}
| 2,229 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/DoTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.impl.dist.collect.BaseDoTable;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.CrunchPairTuple2;
import org.apache.crunch.impl.spark.fn.FlatMapIndexFn;
import org.apache.crunch.types.PTableType;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.storage.StorageLevel;
public class DoTable<K, V> extends BaseDoTable<K, V> implements SparkCollection {
private JavaRDDLike<?, ?> rdd;
<S> DoTable(String name, PCollectionImpl<S> parent, DoFn<S, Pair<K, V>> fn, PTableType<K, V> ntype,
ParallelDoOptions options) {
super(name, parent, fn, ntype, options);
}
<S> DoTable(
String name,
PCollectionImpl<S> parent,
CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> ntype) {
super(name, parent, combineFn, fn, ntype, ParallelDoOptions.builder().build());
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaRDDLike<?, ?> getJavaRDDLikeInternal(SparkRuntime runtime) {
if (combineFn instanceof CombineFn && getOnlyParent() instanceof PGroupedTableImpl) {
runtime.setCombineFn((CombineFn) combineFn);
}
JavaRDDLike<?, ?> parentRDD = ((SparkCollection) getOnlyParent()).getJavaRDDLike(runtime);
fn.configure(runtime.getConfiguration());
return parentRDD
.mapPartitionsWithIndex(
new FlatMapIndexFn(fn, parentRDD instanceof JavaPairRDD, runtime.getRuntimeContext()),
false)
.mapPartitionsToPair(new CrunchPairTuple2());
}
}
| 2,230 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/EmptyPTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.types.PTableType;
import org.apache.spark.api.java.JavaRDDLike;
import scala.Tuple2;
public class EmptyPTable<K, V> extends org.apache.crunch.impl.dist.collect.EmptyPTable<K, V> implements SparkCollection {
public EmptyPTable(DistributedPipeline pipeline, PTableType<K, V> ptype) {
super(pipeline, ptype);
}
@Override
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
return runtime.getSparkContext().parallelizePairs(ImmutableList.<Tuple2<K, V>>of());
}
}
| 2,231 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/CreatedTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.crunch.CreateOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.crunch.impl.spark.serde.SerDeFactory;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.storage.StorageLevel;
import scala.Tuple2;
import java.io.IOException;
import java.util.List;
/**
* Represents a Spark-based PTable that was created from a Java {@code Iterable} of
* key-value pairs.
*/
public class CreatedTable<K, V> extends PTableBase<K, V> implements SparkCollection {
private final Iterable<Pair<K, V>> contents;
private final PTableType<K, V> ptype;
private final int parallelism;
private JavaPairRDD<K, V> rdd;
public CreatedTable(
SparkPipeline pipeline,
Iterable<Pair<K, V>> contents,
PTableType<K, V> ptype,
CreateOptions options) {
super(options.getName(), pipeline);
this.contents = contents;
this.ptype = ptype;
this.parallelism = options.getParallelism();
}
@Override
protected void acceptInternal(Visitor visitor) {
// No-op
}
@Override
public List<PCollectionImpl<?>> getParents() {
return ImmutableList.of();
}
@Override
protected ReadableData<Pair<K, V>> getReadableDataInternal() {
try {
return ptype.createSourceTarget(pipeline.getConfiguration(),
pipeline.createTempPath(), contents, parallelism).asReadable();
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
@Override
protected long getSizeInternal() {
return Iterables.size(contents);
}
@Override
public long getLastModifiedAt() {
return -1;
}
@Override
public PTableType<K, V> getPTableType() {
return ptype;
}
@Override
public PType<Pair<K, V>> getPType() {
return ptype;
}
@Override
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaPairRDD<K, V> getJavaRDDLikeInternal(SparkRuntime runtime) {
ptype.initialize(runtime.getConfiguration());
PType keyType = ptype.getKeyType();
PType valueType = ptype.getValueType();
SerDe keySerde = SerDeFactory.create(keyType, runtime.getConfiguration());
SerDe valueSerde = SerDeFactory.create(valueType, runtime.getConfiguration());
List<Tuple2<ByteArray, ByteArray>> res = Lists.newLinkedList();
try {
for (Pair<K, V> p : contents) {
ByteArray key = keySerde.toBytes(keyType.getOutputMapFn().map(p.first()));
ByteArray value = valueSerde.toBytes(valueType.getOutputMapFn().map(p.second()));
res.add(new Tuple2<ByteArray, ByteArray>(key, value));
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
return runtime.getSparkContext()
.parallelizePairs(res, parallelism)
.mapToPair(new MapPairInputFn<K, V>(
keySerde, valueSerde, keyType.getInputMapFn(), valueType.getInputMapFn(), runtime.getRuntimeContext()));
}
static class MapPairInputFn<K, V> implements PairFunction<Tuple2<ByteArray, ByteArray>, K, V> {
private final SerDe keySerde;
private final SerDe valueSerde;
private final MapFn<Object, K> keyFn;
private final MapFn<Object, V> valueFn;
private final SparkRuntimeContext context;
private boolean initialized;
public MapPairInputFn(
SerDe keySerde,
SerDe valueSerde,
MapFn<Object, K> keyFn,
MapFn<Object, V> valueFn,
SparkRuntimeContext context) {
this.keySerde = keySerde;
this.valueSerde = valueSerde;
this.keyFn = keyFn;
this.valueFn = valueFn;
this.context = context;
this.initialized = false;
}
@Override
public Tuple2<K, V> call(Tuple2<ByteArray, ByteArray> in) throws Exception {
if (!initialized) {
context.initialize(keyFn, -1);
context.initialize(valueFn, -1);
initialized = true;
}
return new Tuple2<K, V>(
keyFn.map(keySerde.fromBytes(in._1().value)),
valueFn.map(valueSerde.fromBytes(in._2().value)));
}
}
}
| 2,232 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/SparkCollectFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.BaseDoCollection;
import org.apache.crunch.impl.dist.collect.BaseDoTable;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.BaseInputTable;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.PCollectionFactory;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import java.util.List;
public class SparkCollectFactory implements PCollectionFactory {
@Override
public <S> BaseInputCollection<S> createInputCollection(
Source<S> source,
String named,
DistributedPipeline pipeline,
ParallelDoOptions doOpts) {
return new InputCollection<S>(source, named, pipeline, doOpts);
}
@Override
public <K, V> BaseInputTable<K, V> createInputTable(
TableSource<K, V> source,
String named,
DistributedPipeline pipeline,
ParallelDoOptions doOpts) {
return new InputTable<K, V>(source, named, pipeline, doOpts);
}
@Override
public <S> BaseUnionCollection<S> createUnionCollection(List<? extends PCollectionImpl<S>> internal) {
return new UnionCollection<S>(internal);
}
@Override
public <S, T> BaseDoCollection<T> createDoCollection(
String name,
PCollectionImpl<S> parent,
DoFn<S, T> fn,
PType<T> type,
ParallelDoOptions options) {
return new DoCollection<T>(name, parent, fn, type, options);
}
@Override
public <S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> parent,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> type,
ParallelDoOptions options) {
return new DoTable<K, V>(name, parent, fn, type, options);
}
@Override
public <S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> parent,
CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> type) {
return new DoTable<K, V>(name, parent, combineFn, fn, type);
}
@Override
public <K, V> BaseGroupedTable<K, V> createGroupedTable(PTableBase<K, V> parent, GroupingOptions groupingOptions) {
return new PGroupedTableImpl<K, V>(parent, groupingOptions);
}
@Override
public <K, V> PTable<K, V> createUnionTable(List<PTableBase<K, V>> internal) {
return new UnionTable<K, V>(internal);
}
}
| 2,233 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/UnionCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.FlatMapPairDoFn;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.storage.StorageLevel;
import java.util.List;
public class UnionCollection<S> extends BaseUnionCollection<S> implements SparkCollection {
private JavaRDDLike<?, ?> rdd;
UnionCollection(List<? extends PCollectionImpl<S>> collections) {
super(collections);
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaRDDLike<?, ?> getJavaRDDLikeInternal(SparkRuntime runtime) {
List<PCollectionImpl<?>> parents = getParents();
JavaRDD[] rdds = new JavaRDD[parents.size()];
for (int i = 0; i < rdds.length; i++) {
if (parents.get(i) instanceof PTableBase) {
JavaPairRDD prdd = (JavaPairRDD) ((SparkCollection) parents.get(i)).getJavaRDDLike(runtime);
rdds[i] = prdd.mapPartitions(new FlatMapPairDoFn(IdentityFn.getInstance(), runtime.getRuntimeContext()));
} else {
rdds[i] = (JavaRDD) ((SparkCollection) parents.get(i)).getJavaRDDLike(runtime);
}
}
return runtime.getSparkContext().union(rdds);
}
}
| 2,234 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/InputCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.MapFn;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Source;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.mr.run.CrunchInputFormat;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.InputConverterFunction;
import org.apache.crunch.impl.spark.fn.MapFunction;
import org.apache.crunch.types.Converter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import java.io.IOException;
public class InputCollection<S> extends BaseInputCollection<S> implements SparkCollection {
InputCollection(Source<S> source, String named, DistributedPipeline pipeline, ParallelDoOptions doOpts) {
super(source, named, pipeline, doOpts);
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
try {
Job job = new Job(runtime.getConfiguration());
FileInputFormat.addInputPaths(job, "/tmp"); //placeholder
source.configureSource(job, 0);
Converter converter = source.getConverter();
JavaPairRDD<?, ?> input = runtime.getSparkContext().newAPIHadoopRDD(
job.getConfiguration(),
CrunchInputFormat.class,
converter.getKeyClass(),
converter.getValueClass());
input.rdd().setName(getName());
MapFn mapFn = converter.applyPTypeTransforms() ? source.getType().getInputMapFn() : IdentityFn.getInstance();
return input
.map(new InputConverterFunction(source.getConverter()))
.map(new MapFunction(mapFn, runtime.getRuntimeContext()));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 2,235 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/PGroupedTableImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkComparator;
import org.apache.crunch.impl.spark.SparkPartitioner;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.CombineMapsideFunction;
import org.apache.crunch.impl.spark.fn.MapOutputFunction;
import org.apache.crunch.impl.spark.fn.PairMapFunction;
import org.apache.crunch.impl.spark.fn.PairMapIterableFunction;
import org.apache.crunch.impl.spark.fn.PartitionedMapOutputFunction;
import org.apache.crunch.impl.spark.fn.ReduceGroupingFunction;
import org.apache.crunch.impl.spark.fn.ReduceInputFunction;
import org.apache.crunch.impl.spark.serde.AvroSerDe;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.crunch.impl.spark.serde.WritableSerDe;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.util.PartitionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.storage.StorageLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
public class PGroupedTableImpl<K, V> extends BaseGroupedTable<K, V> implements SparkCollection {
private static final Logger LOG = LoggerFactory.getLogger(PGroupedTableImpl.class);
private JavaRDDLike<?, ?> rdd;
PGroupedTableImpl(PTableBase<K, V> parent, GroupingOptions groupingOptions) {
super(parent, groupingOptions);
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime, runtime.getCombineFn());
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private AvroSerDe getAvroSerde(PType ptype, Configuration conf) {
AvroType at = (AvroType) ptype;
Map<String, String> props = AvroMode.fromType(at).withFactoryFromConfiguration(conf).getModeProperties();
return new AvroSerDe(at, props);
}
private JavaRDDLike<?, ?> getJavaRDDLikeInternal(SparkRuntime runtime, CombineFn<K, V> combineFn) {
JavaPairRDD<K, V> parentRDD = (JavaPairRDD<K, V>) ((SparkCollection)getOnlyParent()).getJavaRDDLike(runtime);
if (combineFn != null) {
parentRDD = parentRDD.mapPartitionsToPair(
new CombineMapsideFunction<K, V>(combineFn, runtime.getRuntimeContext()));
}
SerDe keySerde, valueSerde;
PTableType<K, V> parentType = ptype.getTableType();
if (parentType instanceof AvroType) {
keySerde = getAvroSerde(parentType.getKeyType(), runtime.getConfiguration());
valueSerde = getAvroSerde(parentType.getValueType(), runtime.getConfiguration());
} else {
keySerde = new WritableSerDe(((WritableType) parentType.getKeyType()).getSerializationClass());
valueSerde = new WritableSerDe(((WritableType) parentType.getValueType()).getSerializationClass());
}
int numPartitions = (groupingOptions.getNumReducers() > 0) ? groupingOptions.getNumReducers() :
PartitionUtils.getRecommendedPartitions(this, getPipeline().getConfiguration());
if (numPartitions <= 0) {
LOG.warn("Attempted to set a non-positive number of partitions");
numPartitions = 1;
}
JavaPairRDD<ByteArray, List<byte[]>> groupedRDD;
if (groupingOptions.getPartitionerClass() != null) {
groupedRDD = parentRDD
.map(new PairMapFunction(ptype.getOutputMapFn(), runtime.getRuntimeContext()))
.mapToPair(
new PartitionedMapOutputFunction(keySerde, valueSerde, ptype, numPartitions, groupingOptions,
runtime.getRuntimeContext()))
.groupByKey(new SparkPartitioner(numPartitions));
} else {
groupedRDD = parentRDD
.map(new PairMapFunction(ptype.getOutputMapFn(), runtime.getRuntimeContext()))
.mapToPair(new MapOutputFunction(keySerde, valueSerde))
.groupByKey(numPartitions);
}
if (groupingOptions.requireSortedKeys() || groupingOptions.getSortComparatorClass() != null) {
SparkComparator scmp = new SparkComparator(groupingOptions, ptype, runtime.getRuntimeContext());
groupedRDD = groupedRDD.sortByKey(scmp);
}
if (groupingOptions.getGroupingComparatorClass() != null) {
groupedRDD = groupedRDD.mapPartitionsToPair(
new ReduceGroupingFunction(groupingOptions, ptype, runtime.getRuntimeContext()));
}
return groupedRDD
.map(new ReduceInputFunction(keySerde, valueSerde))
.mapToPair(new PairMapIterableFunction(ptype.getInputMapFn(), runtime.getRuntimeContext()));
}
}
| 2,236 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/DoCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.DoFn;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.impl.dist.collect.BaseDoCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.FlatMapIndexFn;
import org.apache.crunch.types.PType;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.storage.StorageLevel;
public class DoCollection<S> extends BaseDoCollection<S> implements SparkCollection {
private JavaRDDLike<?, ?> rdd;
<T> DoCollection(String name, PCollectionImpl<T> parent, DoFn<T, S> fn, PType<S> ntype,
ParallelDoOptions options) {
super(name, parent, fn, ntype, options);
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaRDDLike<?, ?> getJavaRDDLikeInternal(SparkRuntime runtime) {
JavaRDDLike<?, ?> parentRDD = ((SparkCollection) getOnlyParent()).getJavaRDDLike(runtime);
fn.configure(runtime.getConfiguration());
return parentRDD.mapPartitionsWithIndex(
new FlatMapIndexFn(fn, parentRDD instanceof JavaPairRDD, runtime.getRuntimeContext()),
false);
}
}
| 2,237 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/ToByteArrayFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.IntByteArray;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.List;
public class ToByteArrayFunction implements PairFunction<Tuple2<IntByteArray, List<byte[]>>, ByteArray, List<byte[]>> {
@Override
public Tuple2<ByteArray, List<byte[]>> call(Tuple2<IntByteArray, List<byte[]>> t) throws Exception {
return new Tuple2<ByteArray, List<byte[]>>(t._1(), t._2());
}
}
| 2,238 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/InputTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.MapFn;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.TableSource;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.BaseInputTable;
import org.apache.crunch.impl.mr.run.CrunchInputFormat;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.InputConverterFunction;
import org.apache.crunch.impl.spark.fn.Tuple2MapFunction;
import org.apache.crunch.types.Converter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDDLike;
import java.io.IOException;
public class InputTable<K, V> extends BaseInputTable<K, V> implements SparkCollection {
public InputTable(TableSource<K, V> source, String named, DistributedPipeline pipeline, ParallelDoOptions doOpts) {
super(source, named, pipeline, doOpts);
}
@Override
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
try {
Job job = new Job(runtime.getConfiguration());
source.configureSource(job, 0); // TODO: a custom input format for crunch-spark
Converter converter = source.getConverter();
JavaPairRDD<?, ?> input = runtime.getSparkContext().newAPIHadoopRDD(
job.getConfiguration(),
CrunchInputFormat.class,
converter.getKeyClass(),
converter.getValueClass());
input.rdd().setName(getName());
MapFn mapFn = converter.applyPTypeTransforms() ? source.getType().getInputMapFn() : IdentityFn.getInstance();
return input
.map(new InputConverterFunction(source.getConverter()))
.mapToPair(new Tuple2MapFunction(mapFn, runtime.getRuntimeContext()));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 2,239 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/UnionTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.dist.collect.BaseUnionTable;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.impl.spark.fn.PairFlatMapDoFn;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaRDDLike;
import org.apache.spark.storage.StorageLevel;
import java.util.List;
public class UnionTable<K, V> extends BaseUnionTable<K, V> implements SparkCollection {
private JavaRDDLike<?, ?> rdd;
UnionTable(List<PTableBase<K, V>> tables) {
super(tables);
}
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
if (!runtime.isValid(rdd)) {
rdd = getJavaRDDLikeInternal(runtime);
rdd.rdd().setName(getName());
StorageLevel sl = runtime.getStorageLevel(this);
if (sl != null) {
rdd.rdd().persist(sl);
}
}
return rdd;
}
private JavaRDDLike<?, ?> getJavaRDDLikeInternal(SparkRuntime runtime) {
List<PCollectionImpl<?>> parents = getParents();
JavaPairRDD[] rdds = new JavaPairRDD[parents.size()];
for (int i = 0; i < rdds.length; i++) {
if (parents.get(i) instanceof PTableBase) {
rdds[i] = (JavaPairRDD) ((SparkCollection) parents.get(i)).getJavaRDDLike(runtime);
} else {
JavaRDD rdd = (JavaRDD) ((SparkCollection) parents.get(i)).getJavaRDDLike(runtime);
rdds[i] = rdd.mapPartitionsToPair(new PairFlatMapDoFn(IdentityFn.getInstance(), runtime.getRuntimeContext()));
}
}
return runtime.getSparkContext().union(rdds);
}
}
| 2,240 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/collect/EmptyPCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.collect;
import com.google.common.collect.ImmutableList;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.spark.SparkCollection;
import org.apache.crunch.impl.spark.SparkRuntime;
import org.apache.crunch.types.PType;
import org.apache.spark.api.java.JavaRDDLike;
public class EmptyPCollection<T> extends org.apache.crunch.impl.dist.collect.EmptyPCollection<T>
implements SparkCollection {
public EmptyPCollection(DistributedPipeline pipeline, PType<T> ptype) {
super(pipeline, ptype);
}
@Override
public JavaRDDLike<?, ?> getJavaRDDLike(SparkRuntime runtime) {
return runtime.getSparkContext().parallelize(ImmutableList.of());
}
}
| 2,241 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/PairFlatMapDoFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Iterables;
import org.apache.crunch.DoFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.GuavaUtils;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
import java.util.Iterator;
public class PairFlatMapDoFn<T, K, V> implements PairFlatMapFunction<Iterator<T>, K, V> {
private final DoFn<T, Pair<K, V>> fn;
private final SparkRuntimeContext ctxt;
public PairFlatMapDoFn(DoFn<T, Pair<K, V>> fn, SparkRuntimeContext ctxt) {
this.fn = fn;
this.ctxt = ctxt;
}
@Override
public Iterator<Tuple2<K, V>> call(Iterator<T> input) throws Exception {
ctxt.initialize(fn, null);
return Iterables.transform(
new CrunchIterable<T, Pair<K, V>>(fn, input),
GuavaUtils.<K, V>pair2tupleFunc()).iterator();
}
}
| 2,242 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/CrunchIterable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.DoFn;
import org.apache.crunch.util.DoFnIterator;
import java.util.Iterator;
public class CrunchIterable<S, T> implements Iterable<T> {
private final DoFn<S, T> fn;
private final Iterator<S> input;
public CrunchIterable(DoFn<S, T> fn, Iterator<S> input) {
this.fn = fn;
this.input = input;
}
@Override
public Iterator<T> iterator() {
return new DoFnIterator<S, T>(input, fn);
}
}
| 2,243 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/PairMapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.Function;
import scala.Tuple2;
public class PairMapFunction<K, V, S> implements Function<Tuple2<K, V>, S> {
private final MapFn<Pair<K, V>, S> fn;
private final SparkRuntimeContext ctxt;
private boolean initialized;
public PairMapFunction(MapFn<Pair<K, V>, S> fn, SparkRuntimeContext ctxt) {
this.fn = fn;
this.ctxt = ctxt;
}
@Override
public S call(Tuple2<K, V> kv) throws Exception {
if (!initialized) {
ctxt.initialize(fn, null);
initialized = true;
}
return fn.map(Pair.of(kv._1(), kv._2()));
}
}
| 2,244 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/ReduceInputFunction.java
|
/*
* *
* * Licensed to the Apache Software Foundation (ASF) under one
* * or more contributor license agreements. See the NOTICE file
* * distributed with this work for additional information
* * regarding copyright ownership. The ASF licenses this file
* * to you under the Apache License, Version 2.0 (the
* * "License"); you may not use this file except in compliance
* * with the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.spark.api.java.function.Function;
import scala.Tuple2;
import java.util.List;
public class ReduceInputFunction<K, V> implements Function<Tuple2<ByteArray, Iterable<byte[]>>, Pair<K, Iterable<V>>> {
private final SerDe<K> keySerDe;
private final SerDe<V> valueSerDe;
public ReduceInputFunction(SerDe<K> keySerDe, SerDe<V> valueSerDe) {
this.keySerDe = keySerDe;
this.valueSerDe = valueSerDe;
}
@Override
public Pair<K, Iterable<V>> call(Tuple2<ByteArray, Iterable<byte[]>> kv) throws Exception {
return Pair.of(keySerDe.fromBytes(kv._1().value), Iterables.transform(kv._2(), valueSerDe.fromBytesFunction()));
}
}
| 2,245 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/ReduceGroupingFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Lists;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
public class ReduceGroupingFunction implements PairFlatMapFunction<Iterator<Tuple2<ByteArray, List<byte[]>>>,
ByteArray, List<byte[]>> {
private final GroupingOptions options;
private final PGroupedTableType ptype;
private final SparkRuntimeContext ctxt;
private transient RawComparator<?> cmp;
public ReduceGroupingFunction(GroupingOptions options,
PGroupedTableType ptype,
SparkRuntimeContext ctxt) {
this.options = options;
this.ptype = ptype;
this.ctxt = ctxt;
}
@Override
public Iterator<Tuple2<ByteArray, List<byte[]>>> call(
final Iterator<Tuple2<ByteArray, List<byte[]>>> iter) throws Exception {
return new GroupingIterator(iter, rawComparator());
}
private RawComparator<?> rawComparator() {
if (cmp == null) {
try {
Job job = new Job(ctxt.getConfiguration());
ptype.configureShuffle(job, options);
cmp = ReflectionUtils.newInstance(options.getGroupingComparatorClass(), job.getConfiguration());
} catch (IOException e) {
throw new CrunchRuntimeException("Error configuring grouping comparator", e);
}
}
return cmp;
}
private static class GroupingIterator implements Iterator<Tuple2<ByteArray, List<byte[]>>> {
private final Iterator<Tuple2<ByteArray, List<byte[]>>> iter;
private final RawComparator cmp;
private ByteArray key;
private List<byte[]> bytes = Lists.newArrayList();
public GroupingIterator(Iterator<Tuple2<ByteArray, List<byte[]>>> iter, RawComparator cmp) {
this.iter = iter;
this.cmp = cmp;
}
@Override
public boolean hasNext() {
return iter.hasNext() || key != null;
}
@Override
public Tuple2<ByteArray, List<byte[]>> next() {
ByteArray nextKey = null;
List<byte[]> next = null;
while (iter.hasNext()) {
Tuple2<ByteArray, List<byte[]>> t = iter.next();
if (key == null) {
key = t._1();
bytes.addAll(t._2());
} else if (cmp.compare(key.value, 0, key.value.length, t._1().value, 0, t._1().value.length) == 0) {
bytes.addAll(t._2());
} else {
nextKey = t._1();
next = Lists.newArrayList(t._2());
break;
}
}
Tuple2<ByteArray, List<byte[]>> ret = new Tuple2<ByteArray, List<byte[]>>(key, bytes);
key = nextKey;
bytes = next;
return ret;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
| 2,246 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/FlatMapIndexFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import org.apache.crunch.DoFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.GuavaUtils;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.crunch.util.DoFnIterator;
import org.apache.spark.api.java.function.Function2;
import scala.Tuple2;
import javax.annotation.Nullable;
import java.util.Iterator;
public class FlatMapIndexFn<S, T> implements Function2<Integer, Iterator, Iterator<T>> {
private final DoFn<S, T> fn;
private final boolean convertInput;
private final SparkRuntimeContext ctxt;
public FlatMapIndexFn(DoFn<S, T> fn, boolean convertInput, SparkRuntimeContext ctxt) {
this.fn = fn;
this.convertInput = convertInput;
this.ctxt = ctxt;
}
@Override
public Iterator<T> call(Integer partitionId, Iterator input) throws Exception {
ctxt.initialize(fn, partitionId);
Iterator in = convertInput ? Iterators.transform(input, GuavaUtils.tuple2PairFunc()) : input;
return new DoFnIterator<S, T>(in, fn);
}
}
| 2,247 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/InputConverterFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.types.Converter;
import org.apache.spark.api.java.function.Function;
import scala.Tuple2;
public class InputConverterFunction<K, V, S> implements Function<Tuple2<K, V>, S> {
private Converter<K, V, S, ?> converter;
public InputConverterFunction(Converter<K, V, S, ?> converter) {
this.converter = converter;
}
@Override
public S call(Tuple2<K, V> kv) throws Exception {
return converter.convertInput(kv._1(), kv._2());
}
}
| 2,248 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/FlatMapPairDoFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Iterators;
import org.apache.crunch.DoFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.GuavaUtils;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import scala.Tuple2;
import java.util.Iterator;
public class FlatMapPairDoFn<K, V, T> implements FlatMapFunction<Iterator<Tuple2<K, V>>, T> {
private final DoFn<Pair<K, V>, T> fn;
private final SparkRuntimeContext ctxt;
public FlatMapPairDoFn(DoFn<Pair<K, V>, T> fn, SparkRuntimeContext ctxt) {
this.fn = fn;
this.ctxt = ctxt;
}
@Override
public Iterator<T> call(Iterator<Tuple2<K, V>> input) throws Exception {
ctxt.initialize(fn, null);
return new CrunchIterable<Pair<K, V>, T>(fn,
Iterators.transform(input, GuavaUtils.<K, V>tuple2PairFunc())).iterator();
}
}
| 2,249 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/CombineMapsideFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.UnmodifiableIterator;
import org.apache.crunch.CombineFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.mem.emit.InMemoryEmitter;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class CombineMapsideFunction<K, V> implements PairFlatMapFunction<Iterator<Tuple2<K, V>>, K, V> {
private static final int REDUCE_EVERY_N = 50000;
private final CombineFn<K,V> combineFn;
private final SparkRuntimeContext ctxt;
public CombineMapsideFunction(CombineFn<K, V> combineFn, SparkRuntimeContext ctxt) {
this.combineFn = combineFn;
this.ctxt = ctxt;
}
@Override
public Iterator<Tuple2<K, V>> call(Iterator<Tuple2<K, V>> iter) throws Exception {
ctxt.initialize(combineFn, null);
Map<K, List<V>> cache = Maps.newHashMap();
int cnt = 0;
while (iter.hasNext()) {
Tuple2<K, V> t = iter.next();
List<V> values = cache.get(t._1());
if (values == null) {
values = Lists.newArrayList();
cache.put(t._1(), values);
}
values.add(t._2());
cnt++;
if (cnt % REDUCE_EVERY_N == 0) {
cache = reduce(cache);
}
}
return new Flattener<K, V>(cache).iterator();
}
private Map<K, List<V>> reduce(Map<K, List<V>> cache) {
Set<K> keys = cache.keySet();
Map<K, List<V>> res = Maps.newHashMap();
for (K key : keys) {
for (Pair<K, V> p : reduce(key, cache.get(key))) {
List<V> values = res.get(p.first());
if (values == null) {
values = Lists.newArrayList();
res.put(p.first(), values);
}
values.add(p.second());
}
}
return res;
}
private List<Pair<K, V>> reduce(K key, Iterable<V> values) {
InMemoryEmitter<Pair<K, V>> emitter = new InMemoryEmitter<Pair<K, V>>();
combineFn.process(Pair.of(key, values), emitter);
combineFn.cleanup(emitter);
return emitter.getOutput();
}
private static class Flattener<K, V> implements Iterable<Tuple2<K, V>> {
private final Map<K, List<V>> entries;
public Flattener(Map<K, List<V>> entries) {
this.entries = entries;
}
@Override
public Iterator<Tuple2<K, V>> iterator() {
return new UnmodifiableIterator<Tuple2<K, V>>() {
private Iterator<K> keyIter = entries.keySet().iterator();
private K currentKey;
private Iterator<V> valueIter = null;
@Override
public boolean hasNext() {
while (valueIter == null || !valueIter.hasNext()) {
if (keyIter.hasNext()) {
currentKey = keyIter.next();
valueIter = entries.get(currentKey).iterator();
} else {
return false;
}
}
return true;
}
@Override
public Tuple2<K, V> next() {
return new Tuple2<K, V>(currentKey, valueIter.next());
}
};
}
}
}
| 2,250 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/CrunchPairTuple2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import com.google.common.collect.Iterators;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.GuavaUtils;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
import java.util.Iterator;
public class CrunchPairTuple2<K, V> implements PairFlatMapFunction<Iterator<Pair<K, V>>, K, V> {
@Override
public Iterator<Tuple2<K, V>> call(final Iterator<Pair<K, V>> iterator) throws Exception {
return Iterators.transform(iterator, GuavaUtils.<K, V>pair2tupleFunc());
}
}
| 2,251 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/Tuple2MapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
public class Tuple2MapFunction<K, V> implements PairFunction<Pair<K, V>, K, V> {
private final MapFn<Pair<K, V>, Pair<K, V>> fn;
private final SparkRuntimeContext ctxt;
private boolean initialized;
public Tuple2MapFunction(MapFn<Pair<K, V>, Pair<K, V>> fn, SparkRuntimeContext ctxt) {
this.fn = fn;
this.ctxt = ctxt;
}
@Override
public Tuple2<K, V> call(Pair<K, V> p) throws Exception {
if (!initialized) {
ctxt.initialize(fn, null);
initialized = true;
}
Pair<K, V> res = fn.map(p);
return new Tuple2<K, V>(res.first(), res.second());
}
}
| 2,252 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/MapOutputFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
public class MapOutputFunction<K, V> implements PairFunction<Pair<K, V>, ByteArray, byte[]> {
private final SerDe keySerde;
private final SerDe valueSerde;
public MapOutputFunction(SerDe keySerde, SerDe valueSerde) {
this.keySerde = keySerde;
this.valueSerde = valueSerde;
}
@Override
public Tuple2<ByteArray, byte[]> call(Pair<K, V> p) throws Exception {
return new Tuple2<ByteArray, byte[]>(
keySerde.toBytes(p.first()),
valueSerde.toBytes(p.second()).value);
}
}
| 2,253 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/PairMapIterableFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.List;
public class PairMapIterableFunction<K, V, S, T> implements PairFunction<Pair<K, List<V>>, S, Iterable<T>> {
private final MapFn<Pair<K, List<V>>, Pair<S, Iterable<T>>> fn;
private final SparkRuntimeContext runtimeContext;
private boolean initialized;
public PairMapIterableFunction(
MapFn<Pair<K, List<V>>, Pair<S, Iterable<T>>> fn,
SparkRuntimeContext runtimeContext) {
this.fn = fn;
this.runtimeContext = runtimeContext;
}
@Override
public Tuple2<S, Iterable<T>> call(Pair<K, List<V>> input) throws Exception {
if (!initialized) {
runtimeContext.initialize(fn, null);
initialized = true;
}
Pair<S, Iterable<T>> out = fn.map(input);
return new Tuple2<S, Iterable<T>>(out.first(), out.second());
}
}
| 2,254 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/OutputConverterFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.types.Converter;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
public class OutputConverterFunction<K, V, S> implements PairFunction<S, K, V> {
private Converter<K, V, S, ?> converter;
public OutputConverterFunction(Converter<K, V, S, ?> converter) {
this.converter = converter;
}
@Override
public Tuple2<K, V> call(S s) throws Exception {
return new Tuple2<K, V>(converter.outputKey(s), converter.outputValue(s));
}
}
| 2,255 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/MapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.MapFn;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.spark.api.java.function.Function;
public class MapFunction implements Function<Object, Object> {
private final MapFn fn;
private final SparkRuntimeContext ctxt;
private boolean initialized;
public MapFunction(MapFn fn, SparkRuntimeContext ctxt) {
this.fn = fn;
this.ctxt = ctxt;
}
@Override
public Object call(Object o) throws Exception {
if (!initialized) {
ctxt.initialize(fn, null);
initialized = true;
}
return fn.map(o);
}
}
| 2,256 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/fn/PartitionedMapOutputFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.spark.IntByteArray;
import org.apache.crunch.impl.spark.SparkRuntimeContext;
import org.apache.crunch.impl.spark.serde.SerDe;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.io.IOException;
public class PartitionedMapOutputFunction<K, V> implements PairFunction<Pair<K, V>, IntByteArray, byte[]> {
private final SerDe<K> keySerde;
private final SerDe<V> valueSerde;
private final PGroupedTableType<K, V> ptype;
private final int numPartitions;
private final SparkRuntimeContext runtimeContext;
private final GroupingOptions options;
private transient Partitioner partitioner;
public PartitionedMapOutputFunction(
SerDe<K> keySerde,
SerDe<V> valueSerde,
PGroupedTableType<K, V> ptype,
int numPartitions,
GroupingOptions options,
SparkRuntimeContext runtimeContext) {
this.keySerde = keySerde;
this.valueSerde = valueSerde;
this.ptype = ptype;
this.numPartitions = numPartitions;
this.options = options;
this.runtimeContext = runtimeContext;
}
@Override
public Tuple2<IntByteArray, byte[]> call(Pair<K, V> p) throws Exception {
int partition = getPartitioner().getPartition(p.first(), p.second(), numPartitions);
return new Tuple2<IntByteArray, byte[]>(
new IntByteArray(partition, keySerde.toBytes(p.first())),
valueSerde.toBytes(p.second()).value);
}
private Partitioner getPartitioner() {
if (partitioner == null) {
try {
ptype.initialize(runtimeContext.getConfiguration());
Job job = new Job(runtimeContext.getConfiguration());
options.configure(job);
ptype.configureShuffle(job, options);
partitioner = ReflectionUtils.newInstance(options.getPartitionerClass(), job.getConfiguration());
} catch (IOException e) {
throw new CrunchRuntimeException("Error configuring partitioner", e);
}
}
return partitioner;
}
}
| 2,257 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/serde/SerDeFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.serde;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.hadoop.conf.Configuration;
import java.util.Map;
public class SerDeFactory {
public static SerDe create(PType<?> ptype, Configuration conf) {
if (WritableTypeFamily.getInstance().equals(ptype.getFamily())) {
return new WritableSerDe(((WritableType) ptype).getSerializationClass());
} else {
AvroType at = (AvroType) ptype;
Map<String, String> props = AvroMode.fromType(at).withFactoryFromConfiguration(conf).getModeProperties();
return new AvroSerDe(at, props);
}
}
}
| 2,258 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/serde/AvroSerDe.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.serde;
import com.google.common.base.Function;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.ByteArrayHelper;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.conf.Configuration;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Map;
public class AvroSerDe<T> implements SerDe<T> {
private AvroType<T> avroType;
private Map<String, String> modeProperties;
private ByteArrayHelper helper;
private transient AvroMode mode;
private transient DatumWriter<T> writer;
private transient DatumReader<T> reader;
public AvroSerDe(AvroType<T> avroType, Map<String, String> modeProperties) {
this.avroType = avroType;
this.modeProperties = modeProperties;
if (avroType.hasReflect() && avroType.hasSpecific()) {
Avros.checkCombiningSpecificAndReflectionSchemas();
}
this.helper = ByteArrayHelper.forAvroSchema(avroType.getSchema());
}
private AvroMode getMode() {
if (mode == null) {
mode = AvroMode.fromType(avroType);
if (modeProperties != null && !modeProperties.isEmpty()) {
Configuration conf = new Configuration();
for (Map.Entry<String, String> e : modeProperties.entrySet()) {
conf.set(e.getKey(), e.getValue());
}
mode = mode.withFactoryFromConfiguration(conf);
}
}
return mode;
}
private DatumWriter<T> getWriter() {
if (writer == null) {
writer = getMode().getWriter(avroType.getSchema());
}
return writer;
}
private DatumReader<T> getReader() {
if (reader == null) {
reader = getMode().getReader(avroType.getSchema());
}
return reader;
}
@Override
public ByteArray toBytes(T obj) throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(out, null);
getWriter().write(obj, encoder);
encoder.flush();
out.close();
return new ByteArray(out.toByteArray(), helper);
}
@Override
public T fromBytes(byte[] bytes) {
Decoder decoder = DecoderFactory.get().binaryDecoder(bytes, null);
try {
return getReader().read(null, decoder);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Function<byte[], T> fromBytesFunction() {
return new Function<byte[], T>() {
@Override
public T apply(@Nullable byte[] input) {
return fromBytes(input);
}
};
}
}
| 2,259 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/serde/SerDe.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.serde;
import com.google.common.base.Function;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.hadoop.conf.Configuration;
import java.io.Serializable;
public interface SerDe<T> extends Serializable {
ByteArray toBytes(T obj) throws Exception;
T fromBytes(byte[] bytes);
Function<byte[], T> fromBytesFunction();
}
| 2,260 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/impl/spark/serde/WritableSerDe.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark.serde;
import com.google.common.base.Function;
import org.apache.crunch.impl.spark.ByteArray;
import org.apache.crunch.impl.spark.ByteArrayHelper;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;
import javax.annotation.Nullable;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
public class WritableSerDe implements SerDe<Writable> {
Class<? extends Writable> clazz;
public WritableSerDe(Class<? extends Writable> clazz) {
this.clazz = clazz;
}
@Override
public ByteArray toBytes(Writable obj) throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
obj.write(dos);
dos.close();
return new ByteArray(baos.toByteArray(), ByteArrayHelper.WRITABLES);
}
@Override
public Writable fromBytes(byte[] bytes) {
Writable inst = ReflectionUtils.newInstance(clazz, null);
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
DataInputStream dis = new DataInputStream(bais);
try {
inst.readFields(dis);
} catch (IOException e) {
throw new RuntimeException(e);
}
return inst;
}
@Override
public Function<byte[], Writable> fromBytesFunction() {
return new Function<byte[], Writable>() {
@Override
public Writable apply(@Nullable byte[] input) {
return fromBytes(input);
}
};
}
}
| 2,261 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SDoubleFlatMapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Emitter;
import org.apache.spark.api.java.function.DoubleFlatMapFunction;
/**
* A Crunch-compatible abstract base class for Spark's {@link DoubleFlatMapFunction}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SDoubleFlatMapFunction<T> extends SparkDoFn<T, Double>
implements DoubleFlatMapFunction<T> {
@Override
public void process(T input, Emitter<Double> emitter) {
try {
for (Double d : new IterableIterator<Double>(call(input))) {
emitter.emit(d);
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,262 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/IterableIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import java.util.Iterator;
class IterableIterator<T> implements Iterable<T> {
private final Iterator<T> itr;
IterableIterator(Iterator<T> itr) {
this.itr = itr;
}
public Iterator<T> iterator() { return itr;}
}
| 2,263 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SparkDoFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
abstract class SparkDoFn<T, R> extends DoFn<T, R> {
@Override
public final void initialize() {
// Forced no-op for Spark compatibility
}
@Override
public final void cleanup(Emitter<R> emitter) {
// Forced no-op for Spark compatibility
}
}
| 2,264 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SFlatMapFunction2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.spark.api.java.function.FlatMapFunction2;
/**
* A Crunch-compatible abstract base class for Spark's {@link FlatMapFunction2}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SFlatMapFunction2<K, V, R> extends DoFn<Pair<K, V>, R>
implements FlatMapFunction2<K, V, R> {
@Override
public void process(Pair<K, V> input, Emitter<R> emitter) {
try {
for (R r : new IterableIterator<R>(call(input.first(), input.second()))) {
emitter.emit(r);
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,265 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SPairFlatMapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import scala.Tuple2;
/**
* A Crunch-compatible abstract base class for Spark's {@link PairFlatMapFunction}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SPairFlatMapFunction<T, K, V> extends SparkDoFn<T, Pair<K, V>>
implements PairFlatMapFunction<T, K, V> {
@Override
public void process(T input, Emitter<Pair<K, V>> emitter) {
try {
for (Tuple2<K, V> kv : new IterableIterator<Tuple2<K, V>>(call(input))) {
emitter.emit(Pair.of(kv._1(), kv._2()));
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,266 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SparkMapFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
abstract class SparkMapFn<T, R> extends MapFn<T, R> {
@Override
public final void initialize() {
// Forced no-op for Spark compatibility
}
@Override
public final void cleanup(Emitter<R> emitter) {
// Forced no-op for Spark compatibility
}
}
| 2,267 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SFunctions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import java.util.Iterator;
import org.apache.spark.api.java.function.DoubleFlatMapFunction;
import org.apache.spark.api.java.function.DoubleFunction;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.FlatMapFunction2;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
/**
* Utility methods for wrapping existing Spark Java API Functions for
* Crunch compatibility.
*/
public final class SFunctions {
public static <T, R> SFunction<T, R> wrap(final Function<T, R> f) {
return new SFunction<T, R>() {
@Override
public R call(T t) throws Exception {
return f.call(t);
}
};
}
public static <K, V, R> SFunction2<K, V, R> wrap(final Function2<K, V, R> f) {
return new SFunction2<K, V, R>() {
@Override
public R call(K k, V v) throws Exception {
return f.call(k, v);
}
};
}
public static <T, K, V> SPairFunction<T, K, V> wrap(final PairFunction<T, K, V> f) {
return new SPairFunction<T, K, V>() {
@Override
public Tuple2<K, V> call(T t) throws Exception {
return f.call(t);
}
};
}
public static <T, R> SFlatMapFunction<T, R> wrap(final FlatMapFunction<T, R> f) {
return new SFlatMapFunction<T, R>() {
@Override
public Iterator<R> call(T t) throws Exception {
return f.call(t);
}
};
}
public static <K, V, R> SFlatMapFunction2<K, V, R> wrap(final FlatMapFunction2<K, V, R> f) {
return new SFlatMapFunction2<K, V, R>() {
@Override
public Iterator<R> call(K k, V v) throws Exception {
return f.call(k, v);
}
};
}
public static <T> SDoubleFunction<T> wrap(final DoubleFunction<T> f) {
return new SDoubleFunction<T>() {
@Override
public double call(T t) throws Exception {
return f.call(t);
}
};
}
public static <T> SDoubleFlatMapFunction<T> wrap(final DoubleFlatMapFunction<T> f) {
return new SDoubleFlatMapFunction<T>() {
@Override
public Iterator<Double> call(T t) throws Exception {
return f.call(t);
}
};
}
private SFunctions() {}
}
| 2,268 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SPairFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Pair;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
/**
* A Crunch-compatible abstract base class for Spark's {@link PairFunction}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SPairFunction<T, K, V> extends SparkMapFn<T, Pair<K, V>>
implements PairFunction<T, K, V> {
@Override
public Pair<K, V> map(T input) {
try {
Tuple2<K, V> t = call(input);
return t == null ? null : Pair.of(t._1(), t._2());
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,269 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SFlatMapFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Emitter;
import org.apache.spark.api.java.function.FlatMapFunction;
/**
* A Crunch-compatible abstract base class for Spark's {@link FlatMapFunction}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SFlatMapFunction<T, R> extends SparkDoFn<T, R>
implements FlatMapFunction<T, R> {
@Override
public void process(T input, Emitter<R> emitter) {
try {
for (R r : new IterableIterator<R>(call(input))) {
emitter.emit(r);
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,270 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.spark.api.java.function.Function;
/**
* A Crunch-compatible abstract base class for Spark's {@link Function}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SFunction<T, R> extends SparkMapFn<T, R> implements Function<T, R> {
@Override
public R map(T input) {
try {
return call(input);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,271 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SFunction2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Pair;
import org.apache.spark.api.java.function.Function2;
/**
* A Crunch-compatible abstract base class for Spark's {@link Function2}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SFunction2<K, V, R> extends SparkMapFn<Pair<K, V>, R>
implements Function2<K, V, R> {
@Override
public R map(Pair<K, V> input) {
try {
return call(input.first(), input.second());
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,272 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/crunch/fn/SDoubleFunction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.spark.api.java.function.DoubleFunction;
/**
* A Crunch-compatible abstract base class for Spark's {@link DoubleFunction}. Subclasses
* of this class may be used against either Crunch {@code PCollections} or Spark {@code RDDs}.
*/
public abstract class SDoubleFunction<T> extends SparkMapFn<T, Double> implements DoubleFunction<T> {
@Override
public Double map(T input) {
try {
return call(input);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
| 2,273 |
0 |
Create_ds/crunch/crunch-spark/src/main/java/org/apache/hadoop
|
Create_ds/crunch/crunch-spark/src/main/java/org/apache/hadoop/mapred/SparkCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import com.google.common.collect.ImmutableMap;
import org.apache.spark.Accumulator;
import java.util.Map;
public class SparkCounter extends Counters.Counter {
private String group;
private String name;
private long value = 0;
private Accumulator<Map<String, Map<String, Long>>> accum;
public SparkCounter(String group, String name, Accumulator<Map<String, Map<String, Long>>> accum) {
this.group = group;
this.name = name;
this.accum = accum;
}
public SparkCounter(String group, String name, long value) {
this.group = group;
this.name = name;
this.value = value;
}
@Override
public String getName() {
return name;
}
@Override
public String getDisplayName() {
return name;
}
@Override
public long getValue() {
return value;
}
@Override
public long getCounter() {
return getValue();
}
@Override
public void increment(long inc) {
this.value += inc;
accum.add(ImmutableMap.<String, Map<String, Long>>of(group, ImmutableMap.of(name, inc)));
}
@Override
public void setValue(long newValue) {
long delta = newValue - value;
accum.add(ImmutableMap.<String, Map<String, Long>>of(group, ImmutableMap.of(name, delta)));
this.value = newValue;
}
}
| 2,274 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/types
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/types/orc/TupleObjectInspectorTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.orc;
import static org.junit.Assert.*;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.TupleN;
import org.apache.crunch.types.TupleFactory;
import org.apache.crunch.types.orc.TupleObjectInspector.ByteBufferObjectInspector;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.io.BytesWritable;
import org.junit.Test;
public class TupleObjectInspectorTest {
@Test
public void testTupleObjectInspector() {
// test get
TupleObjectInspector<TupleN> toi = new TupleObjectInspector<TupleN>(TupleFactory.TUPLEN,
Writables.strings(), Writables.ints(), Writables.floats());
TupleN tuple = new TupleN("Alice", 28, 165.2f);
List<Object> values = toi.getStructFieldsDataAsList(tuple);
assertEquals("Alice", values.get(0));
assertEquals(28, values.get(1));
assertEquals(165.2f, values.get(2));
// test create
TupleN newTuple = toi.create("Alice", 28, 165.2f);
assertEquals(tuple, newTuple);
TupleObjectInspector<Pair> poi = new TupleObjectInspector<Pair>(TupleFactory.PAIR,
Writables.strings(), Writables.ints());
Pair pair = poi.create("word", 29);
assertEquals("word", pair.first());
assertEquals(29, pair.second());
}
@Test
public void testByteBufferObjectInspector() {
byte[] bytes = {0, 9, 4, 18, 64, 6, 1};
BytesWritable bw = new BytesWritable(bytes);
ByteBuffer buf = ByteBuffer.wrap(bytes);
ByteBufferObjectInspector bboi = new ByteBufferObjectInspector();
assertArrayEquals(bytes, bboi.getPrimitiveJavaObject(buf));
assertEquals(bw, bboi.getPrimitiveWritableObject(buf));
assertEquals(buf, bboi.create(bytes));
assertEquals(buf, bboi.create(bw));
ByteBuffer newBuf = bboi.copyObject(buf);
assertTrue(buf != newBuf);
assertEquals(buf, newBuf);
}
}
| 2,275 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/types
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/types/orc/OrcsTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.orc;
import static org.junit.Assert.*;
import java.sql.Timestamp;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.apache.crunch.Pair;
import org.apache.crunch.TupleN;
import org.apache.crunch.io.orc.OrcWritable;
import org.apache.crunch.test.orc.pojos.AddressBook;
import org.apache.crunch.test.orc.pojos.Person;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.junit.Test;
public class OrcsTest {
@SuppressWarnings({ "unchecked", "rawtypes" })
protected static void testInputOutputFn(PType ptype, Object java, OrcWritable orc) {
initialize(ptype);
assertEquals(java, ptype.getInputMapFn().map(orc));
assertEquals(orc, ptype.getOutputMapFn().map(java));
}
private static void initialize(PType ptype) {
ptype.getInputMapFn().initialize();
ptype.getOutputMapFn().initialize();
}
@Test
public void testOrcs() {
String mapValueTypeStr = "struct<a:string,b:int>";
String typeStr = "struct<a:int,b:string,c:float,d:varchar(64)"
+ ",e:map<string," + mapValueTypeStr + ">>";
TypeInfo mapValueTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(mapValueTypeStr);
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
PType<OrcStruct> ptype = Orcs.orcs(typeInfo);
HiveVarchar varchar = new HiveVarchar("Hello World", 32);
Map<Text, OrcStruct> map = new HashMap<Text, OrcStruct>();
OrcStruct value = OrcUtils.createOrcStruct(mapValueTypeInfo, new Text("age"), new IntWritable(24));
map.put(new Text("Bob"), value);
OrcStruct s = OrcUtils.createOrcStruct(typeInfo, new IntWritable(1024), new Text("Alice"),
null, new HiveVarcharWritable(varchar), map);
OrcWritable w = new OrcWritable();
w.set(s);
testInputOutputFn(ptype, s, w);
}
@Test
public void testReflects() {
PType<AddressBook> ptype = Orcs.reflects(AddressBook.class);
AddressBook ab = new AddressBook();
ab.setMyName("John Smith");
ab.setMyNumbers(Arrays.asList("919-333-4452", "650-777-4329"));
Map<String, Person> contacts = new HashMap<String, Person>();
contacts.put("Alice", new Person("Alice", 23, Arrays.asList("666-677-9999")));
contacts.put("Bob", new Person("Bob", 26, Arrays.asList("999-888-1132", "000-222-9934")));
contacts.put("David", null);
ab.setContacts(contacts);
Timestamp now = new Timestamp(System.currentTimeMillis());
ab.setUpdateTime(now);
byte[] signature = {0, 0, 64, 68, 39, 0};
ab.setSignature(signature);
Map<Text, OrcStruct> map = new HashMap<Text, OrcStruct>();
map.put(new Text("Alice"), OrcUtils.createOrcStruct(Person.TYPE_INFO, new Text("Alice"), new IntWritable(23),
Arrays.asList(new Text("666-677-9999"))));
map.put(new Text("Bob"), OrcUtils.createOrcStruct(Person.TYPE_INFO, new Text("Bob"), new IntWritable(26),
Arrays.asList(new Text("999-888-1132"), new Text("000-222-9934"))));
map.put(new Text("David"), null);
OrcStruct s = OrcUtils.createOrcStruct(AddressBook.TYPE_INFO, new Text("John Smith"),
Arrays.asList(new Text("919-333-4452"), new Text("650-777-4329")), map, new TimestampWritable(now),
new BytesWritable(signature));
OrcWritable w = new OrcWritable();
w.set(s);
testInputOutputFn(ptype, ab, w);
}
@Test
public void testTuples() {
PType<TupleN> ptype = Orcs.tuples(Writables.ints(), Writables.strings(), Orcs.reflects(Person.class),
Writables.tableOf(Writables.strings(), Orcs.reflects(Person.class)));
TupleN t = new TupleN(1, "John Smith", new Person("Alice", 23, Arrays.asList("666-677-9999")),
new Pair<String, Person>("Bob", new Person("Bob", 26, Arrays.asList("999-888-1132", "000-222-9934"))));
String typeStr = "struct<a:int,b:string,c:" + Person.TYPE_STR + ",d:struct<d1:string,d2:" + Person.TYPE_STR + ">>";
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
String tableTypeStr = "struct<a:string,b:" + Person.TYPE_STR + ">";
TypeInfo tableTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(tableTypeStr);
OrcStruct s = OrcUtils.createOrcStruct(typeInfo, new IntWritable(1), new Text("John Smith"),
OrcUtils.createOrcStruct(Person.TYPE_INFO, new Text("Alice"), new IntWritable(23),
Arrays.asList(new Text("666-677-9999"))
),
OrcUtils.createOrcStruct(tableTypeInfo, new Text("Bob"),
OrcUtils.createOrcStruct(Person.TYPE_INFO, new Text("Bob"), new IntWritable(26),
Arrays.asList(new Text("999-888-1132"), new Text("000-222-9934"))
)
)
);
OrcWritable w = new OrcWritable();
w.set(s);
testInputOutputFn(ptype, t, w);
}
}
| 2,276 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/test/orc
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/test/orc/pojos/AddressBook.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.test.orc.pojos;
import java.sql.Timestamp;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
public class AddressBook {
public static final String TYPE_STR = "struct<myname:string,mynumbers:array<string>,"
+ "contacts:map<string," + Person.TYPE_STR + ">,updatetime:timestamp,signature:binary>";
public static final TypeInfo TYPE_INFO = TypeInfoUtils.getTypeInfoFromTypeString(TYPE_STR);
private String myName;
private List<String> myNumbers;
private Map<String, Person> contacts;
private Timestamp updateTime;
private byte[] signature;
public AddressBook() {}
public AddressBook(String myName, List<String> myNumbers,
Map<String, Person> contacts, Timestamp updateTime, byte[] signature) {
super();
this.myName = myName;
this.myNumbers = myNumbers;
this.contacts = contacts;
this.updateTime = updateTime;
this.signature = signature;
}
public String getMyName() {
return myName;
}
public void setMyName(String myName) {
this.myName = myName;
}
public List<String> getMyNumbers() {
return myNumbers;
}
public void setMyNumbers(List<String> myNumbers) {
this.myNumbers = myNumbers;
}
public Map<String, Person> getContacts() {
return contacts;
}
public void setContacts(Map<String, Person> contacts) {
this.contacts = contacts;
}
public Timestamp getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Timestamp updateTime) {
this.updateTime = updateTime;
}
public byte[] getSignature() {
return signature;
}
public void setSignature(byte[] signature) {
this.signature = signature;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((contacts == null) ? 0 : contacts.hashCode());
result = prime * result + ((myName == null) ? 0 : myName.hashCode());
result = prime * result + ((myNumbers == null) ? 0 : myNumbers.hashCode());
result = prime * result + Arrays.hashCode(signature);
result = prime * result
+ ((updateTime == null) ? 0 : updateTime.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
AddressBook other = (AddressBook) obj;
if (contacts == null) {
if (other.contacts != null)
return false;
} else if (!contacts.equals(other.contacts))
return false;
if (myName == null) {
if (other.myName != null)
return false;
} else if (!myName.equals(other.myName))
return false;
if (myNumbers == null) {
if (other.myNumbers != null)
return false;
} else if (!myNumbers.equals(other.myNumbers))
return false;
if (!Arrays.equals(signature, other.signature))
return false;
if (updateTime == null) {
if (other.updateTime != null)
return false;
} else if (!updateTime.equals(other.updateTime))
return false;
return true;
}
}
| 2,277 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/test/orc
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/test/orc/pojos/Person.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.test.orc.pojos;
import java.util.List;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
public class Person {
public static final String TYPE_STR = "struct<name:string,age:int,number:array<string>>";
public static final TypeInfo TYPE_INFO = TypeInfoUtils.getTypeInfoFromTypeString(TYPE_STR);
private String name;
private int age;
private List<String> numbers;
public Person() {}
public Person(String name, int age, List<String> numbers) {
super();
this.name = name;
this.age = age;
this.numbers = numbers;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public List<String> getNumbers() {
return numbers;
}
public void setNumbers(List<String> numbers) {
this.numbers = numbers;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + age;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((numbers == null) ? 0 : numbers.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Person other = (Person) obj;
if (age != other.age)
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (numbers == null) {
if (other.numbers != null)
return false;
} else if (!numbers.equals(other.numbers))
return false;
return true;
}
}
| 2,278 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io/orc/OrcFileTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import com.google.common.io.Files;
public class OrcFileTest {
protected transient Configuration conf;
protected transient FileSystem fs;
protected transient Path tempPath;
@Before
public void setUp() throws IOException {
conf = new Configuration();
tempPath = new Path(Files.createTempDir().getAbsolutePath());
fs = tempPath.getFileSystem(conf);
}
@After
public void tearDown() throws IOException {
fs.delete(tempPath, true);
}
}
| 2,279 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io/orc/OrcFileReaderWriterTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import org.apache.crunch.io.orc.OrcFileSource;
import org.apache.crunch.io.orc.OrcFileWriter;
import org.apache.crunch.test.orc.pojos.Person;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.orc.Orcs;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class OrcFileReaderWriterTest extends OrcFileTest {
@Test
public void testReadWrite() throws IOException {
Path path = new Path(tempPath, "test.orc");
PType<Person> ptype = Orcs.reflects(Person.class);
OrcFileWriter<Person> writer = new OrcFileWriter<Person>(conf, path, ptype);
Person p1 = new Person("Alice", 23, Arrays.asList("666-677-9999"));
Person p2 = new Person("Bob", 26, null);
writer.write(p1);
writer.write(p2);
writer.close();
OrcFileSource<Person> reader = new OrcFileSource<Person>(path, ptype);
Iterator<Person> iter = reader.read(conf).iterator();
assertEquals(p1, iter.next());
assertEquals(p2, iter.next());
}
}
| 2,280 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io/orc/OrcFileReaderFactoryTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.orc.OrcUtils;
import org.apache.crunch.types.orc.Orcs;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.junit.Test;
public class OrcFileReaderFactoryTest extends OrcFileTest {
@Test
public void testColumnPruning() throws IOException {
Path path = new Path(tempPath, "test.orc");
String typeStr = "struct<a:int,b:string,c:float>";
TypeInfo info = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
StructObjectInspector soi = (StructObjectInspector) OrcStruct.createObjectInspector(info);
PType<OrcStruct> ptype = Orcs.orcs(info);
OrcFileWriter<OrcStruct> writer = new OrcFileWriter<OrcStruct>(conf, path, ptype);
writer.write(OrcUtils.createOrcStruct(info, new IntWritable(1), new Text("Alice"), new FloatWritable(167.2f)));
writer.write(OrcUtils.createOrcStruct(info, new IntWritable(2), new Text("Bob"), new FloatWritable(179.7f)));
writer.close();
int[] readColumns = {1};
OrcFileSource<OrcStruct> source = new OrcFileSource<OrcStruct>(path, ptype, readColumns);
for (OrcStruct row : source.read(conf)) {
List<Object> list = soi.getStructFieldsDataAsList(row);
assertNull(list.get(0));
assertNotNull(list.get(1));
assertNull(list.get(2));
}
}
}
| 2,281 |
0 |
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/test/java/org/apache/crunch/io/orc/OrcWritableTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import static org.junit.Assert.*;
import java.util.List;
import org.apache.crunch.types.orc.OrcUtils;
import org.apache.crunch.types.writable.WritableDeepCopier;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.junit.Test;
public class OrcWritableTest {
@Test
public void testDeepCopy() {
String typeStr = "struct<a:int,b:string,c:float>";
TypeInfo info = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
StructObjectInspector oi = (StructObjectInspector) OrcStruct.createObjectInspector(info);
BinarySortableSerDe serde = OrcUtils.createBinarySerde(info);
OrcStruct struct = OrcUtils.createOrcStruct(info,
new IntWritable(1), new Text("Alice"), new FloatWritable(165.3f));
OrcWritable writable = new OrcWritable();
writable.set(struct);
assertTrue(struct == writable.get());
writable.setObjectInspector(oi);
writable.setSerde(serde);
WritableDeepCopier<OrcWritable> deepCopier = new WritableDeepCopier<OrcWritable>(OrcWritable.class);
OrcWritable copied = deepCopier.deepCopy(writable);
assertTrue(writable != copied);
assertEquals(writable, copied);
copied.setObjectInspector(oi);
copied.setSerde(serde);
OrcStruct copiedStruct = copied.get();
assertTrue(struct != copiedStruct);
assertEquals(struct, copiedStruct);
List<Object> items = oi.getStructFieldsDataAsList(struct);
List<Object> copiedItems = oi.getStructFieldsDataAsList(copiedStruct);
for (int i = 0; i < items.size(); i++) {
assertTrue(items.get(i) != copiedItems.get(i));
assertEquals(items.get(i), copiedItems.get(i));
}
OrcWritable copied2 = deepCopier.deepCopy(copied);
assertTrue(copied2 != copied);
assertEquals(copied2, copied);
copied2.setObjectInspector(oi);
copied2.setSerde(serde);
OrcStruct copiedStruct2 = copied2.get();
assertTrue(copiedStruct2 != copiedStruct);
assertEquals(copiedStruct2, copiedStruct);
List<Object> copiedItems2 = oi.getStructFieldsDataAsList(copiedStruct2);
for (int i = 0; i < items.size(); i++) {
assertTrue(copiedItems2.get(i) != copiedItems.get(i));
assertEquals(copiedItems2.get(i), copiedItems.get(i));
}
}
@Test
public void testCompareTo() {
String typeStr = "struct<a:int,b:string,c:float>";
TypeInfo info = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
StructObjectInspector oi = (StructObjectInspector) OrcStruct.createObjectInspector(info);
BinarySortableSerDe serde = OrcUtils.createBinarySerde(info);
OrcStruct struct1 = OrcUtils.createOrcStruct(info, new IntWritable(1), new Text("AAA"), new FloatWritable(3.2f));
OrcStruct struct2 = OrcUtils.createOrcStruct(info, new IntWritable(1), new Text("AAB"), null);
OrcStruct struct3 = OrcUtils.createOrcStruct(info, new IntWritable(2), new Text("AAA"), null);
OrcStruct struct4 = OrcUtils.createOrcStruct(info, new IntWritable(2), new Text("AAA"), new FloatWritable(3.2f));
OrcWritable writable1 = new OrcWritable();
writable1.set(struct1);
OrcWritable writable2 = new OrcWritable();
writable2.set(struct2);
OrcWritable writable3 = new OrcWritable();
writable3.set(struct3);
OrcWritable writable4 = new OrcWritable();
writable4.set(struct4);
writable1.setObjectInspector(oi);
writable2.setObjectInspector(oi);
writable3.setObjectInspector(oi);
writable4.setObjectInspector(oi);
writable1.setSerde(serde);
writable2.setSerde(serde);
writable3.setSerde(serde);
writable4.setSerde(serde);
assertTrue(writable1.compareTo(writable2) < 0);
assertTrue(writable2.compareTo(writable3) < 0);
assertTrue(writable1.compareTo(writable3) < 0);
assertTrue(writable3.compareTo(writable4) < 0);
}
}
| 2,282 |
0 |
Create_ds/crunch/crunch-hive/src/it/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/it/java/org/apache/crunch/io/orc/OrcFileSourceTargetIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import static org.junit.Assert.*;
import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.TupleN;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.orc.OrcFileSource;
import org.apache.crunch.io.orc.OrcFileTarget;
import org.apache.crunch.io.orc.OrcFileWriter;
import org.apache.crunch.test.orc.pojos.Person;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.orc.OrcUtils;
import org.apache.crunch.types.orc.Orcs;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import com.google.common.collect.Lists;
public class OrcFileSourceTargetIT extends OrcFileTest implements Serializable {
private void generateInputData() throws IOException {
String typeStr = "struct<name:string,age:int,numbers:array<string>>";
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
OrcStruct s = OrcUtils.createOrcStruct(typeInfo, new Text("Alice"), new IntWritable(23),
Arrays.asList(new Text("919-342-5555"), new Text("650-333-2913")));
OrcFileWriter<OrcStruct> writer = new OrcFileWriter<OrcStruct>(conf, new Path(tempPath, "input.orc"), Orcs.orcs(typeInfo));
writer.write(s);
writer.close();
}
private <T> void testSourceTarget(PType<T> ptype, T expected) {
Path inputPath = new Path(tempPath, "input.orc");
Path outputPath = new Path(tempPath, "output");
Pipeline pipeline = new MRPipeline(OrcFileSourceTargetIT.class, conf);
OrcFileSource<T> source = new OrcFileSource<T>(inputPath, ptype);
PCollection<T> rows = pipeline.read(source);
List<T> result = Lists.newArrayList(rows.materialize());
assertEquals(Lists.newArrayList(expected), result);
OrcFileTarget target = new OrcFileTarget(outputPath);
pipeline.write(rows, target);
assertTrue(pipeline.done().succeeded());
OrcFileReaderFactory<T> reader = new OrcFileReaderFactory<T>(ptype);
List<T> newResult = Lists.newArrayList(reader.read(fs, inputPath));
assertEquals(Lists.newArrayList(expected), newResult);
}
@Test
public void testOrcs() throws IOException {
generateInputData();
String typeStr = "struct<name:string,age:int,numbers:array<string>>";
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
OrcStruct expected = OrcUtils.createOrcStruct(typeInfo, new Text("Alice"), new IntWritable(23),
Arrays.asList(new Text("919-342-5555"), new Text("650-333-2913")));
testSourceTarget(Orcs.orcs(typeInfo), expected);
}
@Test
public void testReflects() throws IOException {
generateInputData();
Person expected = new Person("Alice", 23, Arrays.asList("919-342-5555", "650-333-2913"));
testSourceTarget(Orcs.reflects(Person.class), expected);
}
@Test
public void testTuples() throws IOException {
generateInputData();
TupleN expected = new TupleN("Alice", 23, Arrays.asList("919-342-5555", "650-333-2913"));
testSourceTarget(Orcs.tuples(Writables.strings(), Writables.ints(), Writables.collections(Writables.strings())),
expected);
}
@Test
public void testColumnPruning() throws IOException {
generateInputData();
Pipeline pipeline = new MRPipeline(OrcFileSourceTargetIT.class, conf);
int[] readColumns = {0, 1};
OrcFileSource<Person> source = new OrcFileSource<Person>(new Path(tempPath, "input.orc"),
Orcs.reflects(Person.class), readColumns);
PCollection<Person> rows = pipeline.read(source);
List<Person> result = Lists.newArrayList(rows.materialize());
Person expected = new Person("Alice", 23, null);
assertEquals(Lists.newArrayList(expected), result);
}
@Test
public void testGrouping() throws IOException {
String typeStr = "struct<name:string,age:int,numbers:array<string>>";
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeStr);
OrcStruct s1 = OrcUtils.createOrcStruct(typeInfo, new Text("Bob"), new IntWritable(28), null);
OrcStruct s2 = OrcUtils.createOrcStruct(typeInfo, new Text("Bob"), new IntWritable(28), null);
OrcStruct s3 = OrcUtils.createOrcStruct(typeInfo, new Text("Alice"), new IntWritable(23),
Arrays.asList(new Text("444-333-9999")));
OrcStruct s4 = OrcUtils.createOrcStruct(typeInfo, new Text("Alice"), new IntWritable(36),
Arrays.asList(new Text("919-342-5555"), new Text("650-333-2913")));
Path inputPath = new Path(tempPath, "input.orc");
OrcFileWriter<OrcStruct> writer = new OrcFileWriter<OrcStruct>(conf, inputPath, Orcs.orcs(typeInfo));
writer.write(s1);
writer.write(s2);
writer.write(s3);
writer.write(s4);
writer.close();
Pipeline pipeline = new MRPipeline(OrcFileSourceTargetIT.class, conf);
OrcFileSource<Person> source = new OrcFileSource<Person>(inputPath, Orcs.reflects(Person.class));
PCollection<Person> rows = pipeline.read(source);
PTable<Person, Long> count = rows.count();
List<Pair<Person, Long>> result = Lists.newArrayList(count.materialize());
List<Pair<Person, Long>> expected = Lists.newArrayList(
Pair.of(new Person("Alice", 23, Arrays.asList("444-333-9999")), 1L),
Pair.of(new Person("Alice", 36, Arrays.asList("919-342-5555", "650-333-2913")), 1L),
Pair.of(new Person("Bob", 28, null), 2L));
assertEquals(expected, result);
}
}
| 2,283 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types/orc/Orcs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.orc;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import org.apache.crunch.MapFn;
import org.apache.crunch.Tuple;
import org.apache.crunch.TupleN;
import org.apache.crunch.fn.CompositeMapFn;
import org.apache.crunch.io.orc.OrcWritable;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.TupleFactory;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
/**
* Utilities to create PTypes for ORC serialization / deserialization
*
*/
public class Orcs {
/**
* Create a PType to directly use OrcStruct as the deserialized format. This
* is the fastest way for serialization/deserializations. However, users
* need to use ObjectInspectors to handle the OrcStruct. Currently, void and
* union types are not supported.
*
* @param typeInfo
* @return
*/
public static final PType<OrcStruct> orcs(TypeInfo typeInfo) {
return Writables.derived(OrcStruct.class, new OrcInFn(typeInfo), new OrcOutFn(typeInfo),
Writables.writables(OrcWritable.class));
}
/**
* Create a PType which uses reflection to serialize/deserialize java POJOs
* to/from ORC. There are some restrictions of the POJO: 1) it must have a
* default, no-arg constructor; 2) All of its fields must be Hive primitive
* types or collection types that have Hive equivalents; 3) Void and Union
* are not supported yet.
*
* @param clazz
* @return
*/
public static final <T> PType<T> reflects(Class<T> clazz) {
TypeInfo reflectInfo = createReflectTypeInfo(clazz);
return Writables.derived(clazz, new ReflectInFn<T>(clazz),
new ReflectOutFn<T>(clazz), orcs(reflectInfo));
}
private static TypeInfo createReflectTypeInfo(Class<?> clazz) {
ObjectInspector reflectOi = ObjectInspectorFactory
.getReflectionObjectInspector(clazz, ObjectInspectorOptions.JAVA);
return TypeInfoUtils.getTypeInfoFromObjectInspector(reflectOi);
}
/**
* Create a tuple-based PType. Users can use other Crunch PTypes (such as
* Writables.ints(), Orcs.reflects(), Writables.pairs(), ...) to construct
* the PType. Currently, nulls and unions are not supported.
*
* @param ptypes
* @return
*/
public static final PType<TupleN> tuples(PType... ptypes) {
TypeInfo tupleInfo = createTupleTypeInfo(ptypes);
return derived(TupleN.class, new TupleInFn<TupleN>(TupleFactory.TUPLEN, ptypes),
new TupleOutFn<TupleN>(ptypes), orcs(tupleInfo), ptypes);
}
// derived, but override subtypes
static <S, T> PType<T> derived(Class<T> clazz, MapFn<S, T> inputFn, MapFn<T, S> outputFn,
PType<S> base, PType[] subTypes) {
WritableType<S, ?> wt = (WritableType<S, ?>) base;
MapFn input = new CompositeMapFn(wt.getInputMapFn(), inputFn);
MapFn output = new CompositeMapFn(outputFn, wt.getOutputMapFn());
return new WritableType(clazz, wt.getSerializationClass(), input, output, subTypes);
}
private static TypeInfo createTupleTypeInfo(PType... ptypes) {
ObjectInspector tupleOi = new TupleObjectInspector(null, ptypes);
return TypeInfoUtils.getTypeInfoFromObjectInspector(tupleOi);
}
private static class OrcInFn extends MapFn<OrcWritable, OrcStruct> {
private TypeInfo typeInfo;
private transient ObjectInspector oi;
private transient BinarySortableSerDe serde;
public OrcInFn(TypeInfo typeInfo) {
this.typeInfo = typeInfo;
}
@Override
public void initialize() {
oi = OrcStruct.createObjectInspector(typeInfo);
serde = OrcUtils.createBinarySerde(typeInfo);
}
@Override
public OrcStruct map(OrcWritable input) {
input.setObjectInspector(oi);
input.setSerde(serde);
return input.get();
}
}
private static class OrcOutFn extends MapFn<OrcStruct, OrcWritable> {
private TypeInfo typeInfo;
private transient ObjectInspector oi;
private transient BinarySortableSerDe serde;
public OrcOutFn(TypeInfo typeInfo) {
this.typeInfo = typeInfo;
}
@Override
public void initialize() {
oi = OrcStruct.createObjectInspector(typeInfo);
serde = OrcUtils.createBinarySerde(typeInfo);
}
@Override
public OrcWritable map(OrcStruct input) {
OrcWritable output = new OrcWritable();
output.setObjectInspector(oi);
output.setSerde(serde);
output.set(input);
return output;
}
}
private static Map<Class<?>, Field[]> fieldsCache = new HashMap<Class<?>, Field[]>();
private static class ReflectInFn<T> extends MapFn<OrcStruct, T> {
private Class<T> typeClass;
private TypeInfo typeInfo;
private transient ObjectInspector reflectOi;
private transient ObjectInspector orcOi;
@Override
public void initialize() {
reflectOi = ObjectInspectorFactory
.getReflectionObjectInspector(typeClass, ObjectInspectorOptions.JAVA);
orcOi = OrcStruct.createObjectInspector(typeInfo);
}
public ReflectInFn(Class<T> typeClass) {
this.typeClass = typeClass;
typeInfo = createReflectTypeInfo(typeClass);
}
@Override
public T map(OrcStruct input) {
return (T) OrcUtils.convert(input, orcOi, reflectOi);
}
}
private static class ReflectOutFn<T> extends MapFn<T, OrcStruct> {
private Class<T> typeClass;
private TypeInfo typeInfo;
private transient ObjectInspector reflectOi;
private transient SettableStructObjectInspector orcOi;
@Override
public void initialize() {
reflectOi = ObjectInspectorFactory.getReflectionObjectInspector(typeClass,
ObjectInspectorOptions.JAVA);
orcOi = (SettableStructObjectInspector) OrcStruct.createObjectInspector(typeInfo);
}
public ReflectOutFn(Class<T> typeClass) {
this.typeClass = typeClass;
typeInfo = createReflectTypeInfo(typeClass);
}
@Override
public OrcStruct map(T input) {
return (OrcStruct) OrcUtils.convert(input, reflectOi, orcOi);
}
}
private static class TupleInFn<T extends Tuple> extends MapFn<OrcStruct, T> {
private PType[] ptypes;
private TupleFactory<T> tupleFactory;
private transient ObjectInspector tupleOi;
private transient ObjectInspector orcOi;
public TupleInFn(TupleFactory<T> tupleFactory, PType... ptypes) {
this.tupleFactory = tupleFactory;
this.ptypes = ptypes;
}
@Override
public void initialize() {
tupleOi = new TupleObjectInspector<T>(tupleFactory, ptypes);
TypeInfo info = TypeInfoUtils.getTypeInfoFromObjectInspector(tupleOi);
orcOi = OrcStruct.createObjectInspector(info);
}
@Override
public T map(OrcStruct input) {
return (T) OrcUtils.convert(input, orcOi, tupleOi);
}
}
private static class TupleOutFn<T extends Tuple> extends MapFn<T, OrcStruct> {
private PType[] ptypes;
private transient ObjectInspector tupleOi;
private transient ObjectInspector orcOi;
public TupleOutFn(PType... ptypes) {
this.ptypes = ptypes;
}
@Override
public void initialize() {
tupleOi = new TupleObjectInspector<T>(null, ptypes);
TypeInfo info = TypeInfoUtils.getTypeInfoFromObjectInspector(tupleOi);
orcOi = OrcStruct.createObjectInspector(info);
}
@Override
public OrcStruct map(T input) {
return (OrcStruct) OrcUtils.convert(input, tupleOi, orcOi);
}
}
}
| 2,284 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types/orc/TupleObjectInspector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.orc;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.crunch.Tuple;
import org.apache.crunch.Union;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.TupleFactory;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveJavaObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBinaryObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.io.BytesWritable;
/**
* An object inspector to define the structure of Crunch Tuples
*
*/
public class TupleObjectInspector<T extends Tuple> extends StructObjectInspector {
private TupleFactory<T> tupleFactory;
private List<TupleField> fields;
public TupleObjectInspector(TupleFactory<T> tupleFactory, PType... ptypes) {
this.tupleFactory = tupleFactory;
fields = new ArrayList<TupleField>();
for (int i = 0; i < ptypes.length; i++) {
TupleField field = new TupleField(i, ptypes[i]);
fields.add(field);
}
}
static class TupleField implements StructField {
private int index;
private ObjectInspector oi;
public TupleField(int index, PType<?> ptype) {
this.index = index;
oi = createObjectInspector(ptype);
}
private ObjectInspector createObjectInspector(PType<?> ptype) {
Class typeClass = ptype.getTypeClass();
if (typeClass == Union.class || typeClass == Void.class) {
throw new IllegalArgumentException(typeClass.getName() + " is not supported yet");
}
ObjectInspector result;
if (typeClass == ByteBuffer.class) {
result = new ByteBufferObjectInspector();
} else if (typeClass == Collection.class) {
ObjectInspector itemOi = createObjectInspector(ptype.getSubTypes().get(0));
result = ObjectInspectorFactory.getStandardListObjectInspector(itemOi);
} else if (typeClass == Map.class) {
ObjectInspector keyOi = ObjectInspectorFactory
.getReflectionObjectInspector(String.class, ObjectInspectorOptions.JAVA);
ObjectInspector valueOi = createObjectInspector(ptype.getSubTypes().get(0));
result = ObjectInspectorFactory.getStandardMapObjectInspector(keyOi, valueOi);
} else if (Tuple.class.isAssignableFrom(typeClass)) {
result = new TupleObjectInspector(TupleFactory.getTupleFactory(typeClass),
ptype.getSubTypes().toArray(new PType[0]));
} else {
result = ObjectInspectorFactory.getReflectionObjectInspector(typeClass,
ObjectInspectorOptions.JAVA);
}
return result;
}
@Override
public String getFieldName() {
return "_col" + index;
}
@Override
public ObjectInspector getFieldObjectInspector() {
return oi;
}
@Override
public int getFieldID() {
return index;
}
@Override
public String getFieldComment() {
return null;
}
}
@Override
public String getTypeName() {
StringBuilder buffer = new StringBuilder();
buffer.append("struct<");
for (int i = 0; i < fields.size(); ++i) {
StructField field = fields.get(i);
if (i != 0) {
buffer.append(",");
}
buffer.append(field.getFieldName());
buffer.append(":");
buffer.append(field.getFieldObjectInspector().getTypeName());
}
buffer.append(">");
return buffer.toString();
}
@Override
public Category getCategory() {
return Category.STRUCT;
}
public T create(Object... values) {
return tupleFactory.makeTuple(values);
}
@Override
public List<? extends StructField> getAllStructFieldRefs() {
return fields;
}
@Override
public StructField getStructFieldRef(String fieldName) {
for (StructField field : fields) {
if (field.getFieldName().equals(fieldName)) {
return field;
}
}
return null;
}
@Override
public Object getStructFieldData(Object data, StructField fieldRef) {
TupleField field = (TupleField) fieldRef;
return ((T) data).get(field.index);
}
@Override
public List<Object> getStructFieldsDataAsList(Object data) {
T tuple = (T) data;
List<Object> result = new ArrayList<Object>();
for (int i = 0; i < tuple.size(); i++) {
result.add(tuple.get(i));
}
return result;
}
static class ByteBufferObjectInspector extends AbstractPrimitiveJavaObjectInspector implements SettableBinaryObjectInspector {
ByteBufferObjectInspector() {
super(TypeInfoFactory.binaryTypeInfo);
}
@Override
public ByteBuffer copyObject(Object o) {
if (o == null) {
return null;
}
byte[] oldBytes = getPrimitiveJavaObject(o);
byte[] copiedBytes = new byte[oldBytes.length];
System.arraycopy(oldBytes, 0, copiedBytes, 0, oldBytes.length);
ByteBuffer duplicate = ByteBuffer.wrap(copiedBytes);
return duplicate;
}
@Override
public BytesWritable getPrimitiveWritableObject(Object o) {
if (o == null) {
return null;
}
ByteBuffer buf = (ByteBuffer) o;
BytesWritable bw = new BytesWritable();
bw.set(buf.array(), buf.arrayOffset(), buf.limit());
return bw;
}
@Override
public byte[] getPrimitiveJavaObject(Object o) {
if (o == null) {
return null;
}
ByteBuffer buf = (ByteBuffer) o;
byte[] b = new byte[buf.limit()];
System.arraycopy(buf.array(), buf.arrayOffset(), b, 0, b.length);
return b;
}
@Override
public Object set(Object o, byte[] b) {
throw new UnsupportedOperationException("set is not supported");
}
@Override
public Object set(Object o, BytesWritable bw) {
throw new UnsupportedOperationException("set is not supported");
}
@Override
public ByteBuffer create(byte[] bb) {
return bb == null ? null : ByteBuffer.wrap(bb);
}
@Override
public ByteBuffer create(BytesWritable bw) {
return bw == null ? null : ByteBuffer.wrap(bw.getBytes());
}
}
}
| 2,285 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/types/orc/OrcUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.types.orc;
import java.sql.Date;
import java.sql.Timestamp;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableMapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBinaryObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableBooleanObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableByteObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableDateObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableDoubleObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableFloatObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveCharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableHiveVarcharObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableIntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableLongObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableShortObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableStringObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.SettableTimestampObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
public class OrcUtils {
/**
* Generate TypeInfo for a given java class based on reflection
*
* @param typeClass
* @return
*/
public static TypeInfo getTypeInfo(Class<?> typeClass) {
ObjectInspector oi = ObjectInspectorFactory
.getReflectionObjectInspector(typeClass, ObjectInspectorOptions.JAVA);
return TypeInfoUtils.getTypeInfoFromObjectInspector(oi);
}
/**
* Create an object of OrcStruct given a type string and a list of objects
*
* @param typeInfo
* @param objs
* @return
*/
public static OrcStruct createOrcStruct(TypeInfo typeInfo, Object... objs) {
SettableStructObjectInspector oi = (SettableStructObjectInspector) OrcStruct
.createObjectInspector(typeInfo);
List<StructField> fields = (List<StructField>) oi.getAllStructFieldRefs();
OrcStruct result = (OrcStruct) oi.create();
result.setNumFields(fields.size());
for (int i = 0; i < fields.size(); i++) {
oi.setStructFieldData(result, fields.get(i), objs[i]);
}
return result;
}
/**
* Create a binary serde for OrcStruct serialization/deserialization
*
* @param typeInfo
* @return
*/
public static BinarySortableSerDe createBinarySerde(TypeInfo typeInfo){
BinarySortableSerDe serde = new BinarySortableSerDe();
StringBuffer nameSb = new StringBuffer();
StringBuffer typeSb = new StringBuffer();
StructTypeInfo sti = (StructTypeInfo) typeInfo;
for (String name : sti.getAllStructFieldNames()) {
nameSb.append(name);
nameSb.append(',');
}
for (TypeInfo info : sti.getAllStructFieldTypeInfos()) {
typeSb.append(info.toString());
typeSb.append(',');
}
Properties tbl = new Properties();
String names = nameSb.length() > 0 ? nameSb.substring(0,
nameSb.length() - 1) : "";
String types = typeSb.length() > 0 ? typeSb.substring(0,
typeSb.length() - 1) : "";
tbl.setProperty(serdeConstants.LIST_COLUMNS, names);
tbl.setProperty(serdeConstants.LIST_COLUMN_TYPES, types);
try {
serde.initialize(null, tbl);
} catch (SerDeException e) {
throw new CrunchRuntimeException("Unable to initialize binary serde");
}
return serde;
}
/**
* Convert an object from / to OrcStruct
*
* @param from
* @param fromOi
* @param toOi
* @return
*/
public static Object convert(Object from, ObjectInspector fromOi, ObjectInspector toOi) {
if (from == null) {
return null;
}
Object to;
switch (fromOi.getCategory()) {
case PRIMITIVE:
PrimitiveObjectInspector fromPoi = (PrimitiveObjectInspector) fromOi;
switch (fromPoi.getPrimitiveCategory()) {
case FLOAT:
SettableFloatObjectInspector floatOi = (SettableFloatObjectInspector) toOi;
return floatOi.create((Float) fromPoi.getPrimitiveJavaObject(from));
case DOUBLE:
SettableDoubleObjectInspector doubleOi = (SettableDoubleObjectInspector) toOi;
return doubleOi.create((Double) fromPoi.getPrimitiveJavaObject(from));
case BOOLEAN:
SettableBooleanObjectInspector boolOi = (SettableBooleanObjectInspector) toOi;
return boolOi.create((Boolean) fromPoi.getPrimitiveJavaObject(from));
case INT:
SettableIntObjectInspector intOi = (SettableIntObjectInspector) toOi;
return intOi.create((Integer) fromPoi.getPrimitiveJavaObject(from));
case LONG:
SettableLongObjectInspector longOi = (SettableLongObjectInspector) toOi;
return longOi.create((Long) fromPoi.getPrimitiveJavaObject(from));
case STRING:
SettableStringObjectInspector strOi = (SettableStringObjectInspector) toOi;
return strOi.create((String) fromPoi.getPrimitiveJavaObject(from));
case BYTE:
SettableByteObjectInspector byteOi = (SettableByteObjectInspector) toOi;
return byteOi.create((Byte) fromPoi.getPrimitiveJavaObject(from));
case SHORT:
SettableShortObjectInspector shortOi = (SettableShortObjectInspector) toOi;
return shortOi.create((Short) fromPoi.getPrimitiveJavaObject(from));
case BINARY:
SettableBinaryObjectInspector binOi = (SettableBinaryObjectInspector) toOi;
return binOi.create((byte[]) fromPoi.getPrimitiveJavaObject(from));
case TIMESTAMP:
SettableTimestampObjectInspector timeOi = (SettableTimestampObjectInspector) toOi;
return timeOi.create((Timestamp) fromPoi.getPrimitiveJavaObject(from));
case DATE:
SettableDateObjectInspector dateOi = (SettableDateObjectInspector) toOi;
return dateOi.create((Date) fromPoi.getPrimitiveJavaObject(from));
case DECIMAL:
SettableHiveDecimalObjectInspector decimalOi = (SettableHiveDecimalObjectInspector) toOi;
return decimalOi.create((HiveDecimal) fromPoi.getPrimitiveJavaObject(from));
case CHAR:
SettableHiveCharObjectInspector charOi = (SettableHiveCharObjectInspector) toOi;
return charOi.create((HiveChar) fromPoi.getPrimitiveJavaObject(from));
case VARCHAR:
SettableHiveVarcharObjectInspector varcharOi = (SettableHiveVarcharObjectInspector) toOi;
return varcharOi.create((HiveVarchar) fromPoi.getPrimitiveJavaObject(from));
case VOID:
throw new IllegalArgumentException("Void type is not supported yet");
default:
throw new IllegalArgumentException("Unknown primitive type "
+ (fromPoi).getPrimitiveCategory());
}
case STRUCT:
StructObjectInspector fromSoi = (StructObjectInspector) fromOi;
List<StructField> fromFields = (List<StructField>) fromSoi.getAllStructFieldRefs();
List<Object> fromItems = fromSoi.getStructFieldsDataAsList(from);
// this is a tuple. use TupleObjectInspector to construct the result
if (toOi instanceof TupleObjectInspector) {
TupleObjectInspector toToi = (TupleObjectInspector) toOi;
List<StructField> toFields = (List<StructField>) toToi.getAllStructFieldRefs();
Object[] values = new Object[fromItems.size()];
for (int i = 0; i < fromItems.size(); i++) {
values[i] = convert(fromItems.get(i),
fromFields.get(i).getFieldObjectInspector(),
toFields.get(i).getFieldObjectInspector());
}
return toToi.create(values);
}
SettableStructObjectInspector toSoi = (SettableStructObjectInspector) toOi;
List<StructField> toFields = (List<StructField>) toSoi.getAllStructFieldRefs();
to = toSoi.create();
for (int i = 0; i < fromItems.size(); i++) {
Object converted = convert(fromItems.get(i),
fromFields.get(i).getFieldObjectInspector(),
toFields.get(i).getFieldObjectInspector());
toSoi.setStructFieldData(to, toFields.get(i), converted);
}
return to;
case MAP:
MapObjectInspector fromMoi = (MapObjectInspector) fromOi;
SettableMapObjectInspector toMoi = (SettableMapObjectInspector) toOi;
to = toMoi.create(); // do not reuse
for (Map.Entry<?, ?> entry : fromMoi.getMap(from).entrySet()) {
Object convertedKey = convert(entry.getKey(),
fromMoi.getMapKeyObjectInspector(),
toMoi.getMapKeyObjectInspector());
Object convertedValue = convert(entry.getValue(),
fromMoi.getMapValueObjectInspector(),
toMoi.getMapValueObjectInspector());
toMoi.put(to, convertedKey, convertedValue);
}
return to;
case LIST:
ListObjectInspector fromLoi = (ListObjectInspector) fromOi;
List<?> fromList = fromLoi.getList(from);
SettableListObjectInspector toLoi = (SettableListObjectInspector) toOi;
to = toLoi.create(fromList.size()); // do not reuse
for (int i = 0; i < fromList.size(); i++) {
Object converted = convert(fromList.get(i),
fromLoi.getListElementObjectInspector(),
toLoi.getListElementObjectInspector());
toLoi.set(to, i, converted);
}
return to;
case UNION:
throw new IllegalArgumentException("Union type is not supported yet");
default:
throw new IllegalArgumentException("Unknown type " + fromOi.getCategory());
}
}
}
| 2,286 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcCrunchOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.IOException;
import org.apache.hadoop.hive.ql.io.orc.OrcNewOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class OrcCrunchOutputFormat extends FileOutputFormat<NullWritable, OrcWritable> {
private OrcNewOutputFormat outputFormat = new OrcNewOutputFormat();
@Override
public RecordWriter<NullWritable, OrcWritable> getRecordWriter(
TaskAttemptContext job) throws IOException, InterruptedException {
RecordWriter<NullWritable, Writable> writer = outputFormat.getRecordWriter(job);
return new OrcCrunchRecordWriter(writer);
}
static class OrcCrunchRecordWriter extends RecordWriter<NullWritable, OrcWritable> {
private final RecordWriter<NullWritable, Writable> writer;
private final OrcSerde orcSerde;
OrcCrunchRecordWriter(RecordWriter<NullWritable, Writable> writer) {
this.writer = writer;
this.orcSerde = new OrcSerde();
}
@Override
public void write(NullWritable key, OrcWritable value) throws IOException,
InterruptedException {
if (value.get() == null) {
throw new NullPointerException("Cannot write null records to orc file");
}
writer.write(key, orcSerde.serialize(value.get(), value.getObjectInspector()));
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
writer.close(context);
}
}
}
| 2,287 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcFileSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
public class OrcFileSourceTarget<T> extends ReadableSourcePathTargetImpl<T> {
public OrcFileSourceTarget(Path path, PType<T> ptype) {
this(path, ptype, SequentialFileNamingScheme.getInstance());
}
public OrcFileSourceTarget(Path path, PType<T> ptype, FileNamingScheme fileNameScheme) {
this(new OrcFileSource<T>(path, ptype), new OrcFileTarget(path), fileNameScheme);
}
public OrcFileSourceTarget(ReadableSource<T> source, PathTarget target,
FileNamingScheme fileNamingScheme) {
super(source, target, fileNamingScheme);
}
@Override
public String toString() {
return target.toString();
}
}
| 2,288 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcFileTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
public class OrcFileTarget extends FileTargetImpl {
public OrcFileTarget(String path) {
this(new Path(path));
}
public OrcFileTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public OrcFileTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, OrcCrunchOutputFormat.class, fileNamingScheme);
}
@Override
public String toString() {
return "Orc(" + path.toString() + ")";
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
return new OrcFileSourceTarget<T>(path, ptype).fileSystem(getFileSystem());
}
}
| 2,289 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.ReadableData;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
public class OrcFileSource<T> extends FileSourceImpl<T> implements ReadableSource<T> {
private int[] readColumns;
public static final String HIVE_READ_ALL_COLUMNS = "hive.io.file.read.all.columns";
private static <S> FormatBundle<OrcCrunchInputFormat> getBundle(int[] readColumns) {
FormatBundle<OrcCrunchInputFormat> fb = FormatBundle.forInput(OrcCrunchInputFormat.class);
if (readColumns != null) { // setting configurations for column pruning
fb.set(HIVE_READ_ALL_COLUMNS, "false");
fb.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, getColumnIdsStr(readColumns));
}
return fb;
}
static String getColumnIdsStr(int[] columns) {
StringBuilder sb = new StringBuilder();
for (int c : columns) {
sb.append(c);
sb.append(',');
}
return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : "";
}
public OrcFileSource(Path path, PType<T> ptype) {
this(path, ptype, null);
}
/**
* Constructor for column pruning optimization
*
* @param path
* @param ptype
* @param readColumns columns which will be read
*/
public OrcFileSource(Path path, PType<T> ptype, int[] readColumns) {
super(path, ptype, getBundle(readColumns));
this.readColumns = readColumns;
}
public OrcFileSource(List<Path> paths, PType<T> ptype) {
this(paths, ptype, null);
}
/**
* Constructor for column pruning optimization
*
* @param paths
* @param ptype
* @param columns columns which will be reserved
*/
public OrcFileSource(List<Path> paths, PType<T> ptype, int[] columns) {
super(paths, ptype, getBundle(columns));
}
@Override
public String toString() {
return "Orc(" + pathsAsString() + ")";
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, new OrcFileReaderFactory<T>(ptype, readColumns));
}
@Override
public ReadableData<T> asReadable() {
return new OrcReadableData<T>(this.paths, ptype, readColumns);
}
}
| 2,290 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.util.List;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
public class OrcReadableData<T> extends ReadableDataImpl<T> {
private final PType<T> ptype;
private final int[] readColumns;
public OrcReadableData(List<Path> paths, PType<T> ptype) {
this(paths, ptype, null);
}
public OrcReadableData(List<Path> paths, PType<T> ptype, int[] readColumns) {
super(paths);
this.ptype = ptype;
this.readColumns = readColumns;
}
@Override
public FileReaderFactory<T> getFileReaderFactory() {
return new OrcFileReaderFactory<T>(ptype, readColumns);
}
}
| 2,291 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.types.orc.OrcUtils;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparable;
public class OrcWritable implements WritableComparable<OrcWritable> {
private OrcStruct orc;
private ObjectInspector oi; // object inspector for orc struct
private BytesWritable blob; // serialized from orc struct
private BinarySortableSerDe serde;
@Override
public void write(DataOutput out) throws IOException {
serialize();
blob.write(out);
}
private void serialize() {
try {
if (blob == null) {
// Make a copy since BinarySortableSerDe will reuse the byte buffer.
// This is not very efficient for the current implementation. Shall we
// implement a no-reuse version of BinarySortableSerDe?
byte[] bytes = ((BytesWritable) serde.serialize(orc, oi)).getBytes();
byte[] newBytes = new byte[bytes.length];
System.arraycopy(bytes, 0, newBytes, 0, bytes.length);
blob = new BytesWritable(newBytes);
}
} catch (SerDeException e) {
throw new CrunchRuntimeException("Unable to serialize object: "
+ orc);
}
}
@Override
public void readFields(DataInput in) throws IOException {
blob = new BytesWritable();
blob.readFields(in);
orc = null; // the orc struct is stale
}
@Override
public int compareTo(OrcWritable arg0) {
serialize();
arg0.serialize();
return ((Comparable) blob).compareTo((Comparable) arg0.blob);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
return compareTo((OrcWritable) obj) == 0;
}
@Override
public int hashCode() {
return blob == null ? 0 : blob.hashCode();
}
public void setSerde(BinarySortableSerDe serde) {
this.serde = serde;
}
public void setObjectInspector(ObjectInspector oi) {
this.oi = oi;
}
public ObjectInspector getObjectInspector() {
return oi;
}
public void set(OrcStruct orcStruct) {
this.orc = orcStruct;
blob = null; // the blob is stale
}
public OrcStruct get() {
if (orc == null && blob != null) {
makeOrcStruct();
}
return orc;
}
private void makeOrcStruct() {
try {
Object row = serde.deserialize(blob);
StructObjectInspector rowOi = (StructObjectInspector) serde.getObjectInspector();
orc = (OrcStruct) OrcUtils.convert(row, rowOi, oi);
} catch (SerDeException e) {
throw new CrunchRuntimeException("Unable to deserialize blob: " + blob);
}
}
}
| 2,292 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcCrunchInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hive.ql.io.orc.OrcNewInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
public class OrcCrunchInputFormat extends InputFormat<NullWritable, OrcWritable> {
private OrcNewInputFormat inputFormat = new OrcNewInputFormat();
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
return inputFormat.getSplits(context);
}
@Override
public RecordReader<NullWritable, OrcWritable> createRecordReader(
InputSplit split, TaskAttemptContext context) throws IOException,
InterruptedException {
RecordReader<NullWritable, OrcStruct> reader = inputFormat.createRecordReader(
split, context);
return new OrcCrunchRecordReader(reader);
}
static class OrcCrunchRecordReader extends RecordReader<NullWritable, OrcWritable> {
private final RecordReader<NullWritable, OrcStruct> reader;
private OrcWritable value = new OrcWritable();
OrcCrunchRecordReader(RecordReader<NullWritable, OrcStruct> reader) {
this.reader = reader;
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public NullWritable getCurrentKey() throws IOException,
InterruptedException {
return NullWritable.get();
}
@Override
public OrcWritable getCurrentValue() throws IOException, InterruptedException {
return value;
}
@Override
public float getProgress() throws IOException, InterruptedException {
return reader.getProgress();
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
boolean hasNext = reader.nextKeyValue();
if (hasNext) {
value.set(reader.getCurrentValue());
}
return hasNext;
}
}
}
| 2,293 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcFileWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.io.Closeable;
import java.io.IOException;
import org.apache.crunch.MapFn;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
/**
* A writer class which is corresponding to OrcFileReaderFactory. Mainly used
* for test purpose
*
* @param <T>
*/
public class OrcFileWriter<T> implements Closeable {
private RecordWriter<NullWritable, Object> writer;
private MapFn<T, Object> mapFn;
private final OrcSerde serde;
static class NullProgress implements Progressable {
@Override
public void progress() {
}
}
public OrcFileWriter(Configuration conf, Path path, PType<T> pType) throws IOException {
JobConf jobConf = new JobConf(conf);
OutputFormat outputFormat = new OrcOutputFormat();
writer = outputFormat.getRecordWriter(null, jobConf, path.toString(), new NullProgress());
mapFn = pType.getOutputMapFn();
mapFn.initialize();
serde = new OrcSerde();
}
public void write(T t) throws IOException {
OrcWritable ow = (OrcWritable) mapFn.map(t);
if (ow.get() == null) {
throw new NullPointerException("Cannot write null records to orc file");
}
writer.write(NullWritable.get(), serde.serialize(ow.get(), ow.getObjectInspector()));
}
@Override
public void close() throws IOException {
writer.close(Reporter.NULL);
}
}
| 2,294 |
0 |
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-hive/src/main/java/org/apache/crunch/io/orc/OrcFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.orc;
import java.util.Iterator;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.MapFn;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import com.google.common.collect.UnmodifiableIterator;
public class OrcFileReaderFactory<T> implements FileReaderFactory<T> {
private MapFn<Object, T> inputFn;
private OrcInputFormat inputFormat = new OrcInputFormat();
private int[] readColumns;
public OrcFileReaderFactory(PType<T> ptype) {
this(ptype, null);
}
public OrcFileReaderFactory(PType<T> ptype, int[] readColumns) {
inputFn = ptype.getInputMapFn();
this.readColumns = readColumns;
}
@Override
public Iterator<T> read(FileSystem fs, final Path path) {
try {
if (!fs.isFile(path)) {
throw new CrunchRuntimeException("Not a file: " + path);
}
inputFn.initialize();
FileStatus status = fs.getFileStatus(path);
FileSplit split = new FileSplit(path, 0, status.getLen(), new String[0]);
JobConf conf = new JobConf();
if (readColumns != null) {
conf.setBoolean(OrcFileSource.HIVE_READ_ALL_COLUMNS, false);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, OrcFileSource.getColumnIdsStr(readColumns));
}
final RecordReader<NullWritable, OrcStruct> reader = inputFormat.getRecordReader(split, conf, Reporter.NULL);
return new UnmodifiableIterator<T>() {
private boolean checked = false;
private boolean hasNext;
private OrcStruct value;
private OrcWritable writable = new OrcWritable();
@Override
public boolean hasNext() {
try {
if (value == null) {
value = reader.createValue();
}
if (!checked) {
hasNext = reader.next(NullWritable.get(), value);
checked = true;
}
return hasNext;
} catch (Exception e) {
throw new CrunchRuntimeException("Error while reading local file: " + path, e);
}
}
@Override
public T next() {
try {
if (value == null) {
value = reader.createValue();
}
if (!checked) {
reader.next(NullWritable.get(), value);
}
checked = false;
writable.set(value);
return inputFn.map(writable);
} catch (Exception e) {
throw new CrunchRuntimeException("Error while reading local file: " + path, e);
}
}
};
} catch (Exception e) {
throw new CrunchRuntimeException("Error while reading local file: " + path, e);
}
}
}
| 2,295 |
0 |
Create_ds/crunch/crunch-contrib/src/test/java/org/apache/crunch/contrib
|
Create_ds/crunch/crunch-contrib/src/test/java/org/apache/crunch/contrib/text/ParseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.contrib.text;
import static org.apache.crunch.contrib.text.Extractors.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.Collection;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.types.avro.Avros;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
public class ParseTest {
@Test
public void testInt() {
assertEquals(Integer.valueOf(1729), xint().extract("1729"));
assertEquals(Integer.valueOf(321), xint(321).extract("foo"));
}
@Test
public void testString() {
assertEquals("bar", xstring().extract("bar"));
}
@Test
public void testPairWithDrop() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(",").drop(0, 2).build();
assertEquals(Pair.of(1, "abc"), xpair(sf, xint(), xstring()).extract("foo,1,17.29,abc"));
}
@Test
public void testTripsWithSkip() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(";").skip("^foo").build();
assertEquals(Tuple3.of(17, "abc", 3.4f),
xtriple(sf, xint(), xstring(), xfloat()).extract("foo17;abc;3.4"));
}
@Test
public void testTripsWithKeep() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(";").keep(1, 2, 3).build();
assertEquals(Tuple3.of(17, "abc", 3.4f),
xtriple(sf, xint(), xstring(), xfloat()).extract("foo;17;abc;3.4"));
}
@Test
public void testQuadsWithWhitespace() {
TokenizerFactory sf = TokenizerFactory.getDefaultInstance();
assertEquals(Tuple4.of(1.3, "foo", true, 1L),
xquad(sf, xdouble(), xstring(), xboolean(), xlong()).extract("1.3 foo true 1"));
}
@Test
public void testTupleN() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(",").build();
assertEquals(new TupleN(1, false, true, 2, 3),
xtupleN(sf, xint(), xboolean(), xboolean(), xint(), xint()).extract("1,false,true,2,3"));
}
@Test
public void testCollections() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(";").build();
// Use 3000 as the default for values we can't parse
Extractor<Collection<Integer>> x = xcollect(sf, xint(3000));
assertEquals(ImmutableList.of(1, 2, 3), x.extract("1;2;3"));
assertFalse(x.errorOnLastRecord());
assertEquals(ImmutableList.of(17, 29, 3000), x.extract("17;29;a"));
assertTrue(x.errorOnLastRecord());
assertEquals(1, x.getStats().getErrorCount());
}
@Test
public void testNestedComposites() {
TokenizerFactory outer = TokenizerFactory.builder().delimiter(";").build();
TokenizerFactory inner = TokenizerFactory.builder().delimiter(",").build();
Extractor<Pair<Pair<Long, Integer>, Tuple3<String, Integer, Float>>> extractor =
xpair(outer, xpair(inner, xlong(), xint()), xtriple(inner, xstring(), xint(), xfloat()));
assertEquals(Pair.of(Pair.of(1L, 2), Tuple3.of("a", 17, 29.0f)),
extractor.extract("1,2;a,17,29"));
}
@Test
public void testParse() {
TokenizerFactory sf = TokenizerFactory.builder().delimiter(",").build();
PCollection<String> lines = MemPipeline.typedCollectionOf(Avros.strings(), "1,3.0");
Iterable<Pair<Integer, Float>> it = Parse.parse("test", lines,
xpair(sf, xint(), xfloat())).materialize();
assertEquals(ImmutableList.of(Pair.of(1, 3.0f)), it);
}
}
| 2,296 |
0 |
Create_ds/crunch/crunch-contrib/src/it/java/org/apache/crunch/contrib/io
|
Create_ds/crunch/crunch-contrib/src/it/java/org/apache/crunch/contrib/io/jdbc/DataBaseSourceIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.contrib.io.jdbc;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.Serializable;
import java.util.List;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.test.CrunchTestSupport;
import org.apache.crunch.types.writable.Writables;
import org.h2.tools.RunScript;
import org.h2.tools.Server;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
public class DataBaseSourceIT extends CrunchTestSupport implements Serializable {
private transient Server server;
@Before
public void start() throws Exception {
File file = tempDir.copyResourceFile("data.script");
server = Server.createTcpServer().start();
RunScript.execute("jdbc:h2:file:/tmp/test", "sa", "", file.getAbsolutePath(), "utf-8", false);
}
@After
public void stop() throws Exception {
server.stop();
new File("/tmp/test.h2.db").delete();
}
@Test
public void testReadFromSource() throws Exception {
Pipeline pipeline = new MRPipeline(DataBaseSourceIT.class);
DataBaseSource<IdentifiableName> dbsrc = new DataBaseSource.Builder<IdentifiableName>(IdentifiableName.class)
.setDriverClass(org.h2.Driver.class)
.setUrl("jdbc:h2:file:/tmp/test").setUsername("sa").setPassword("")
.selectSQLQuery("SELECT ID, NAME FROM TEST").countSQLQuery("select count(*) from Test").build();
PCollection<IdentifiableName> cdidata = pipeline.read(dbsrc);
PCollection<String> names = cdidata.parallelDo(new DoFn<IdentifiableName, String>() {
@Override
public void process(IdentifiableName input, Emitter<String> emitter) {
emitter.emit(input.name.toString());
}
}, Writables.strings());
List<String> nameList = Lists.newArrayList(names.materialize());
pipeline.done();
assertEquals(2, nameList.size());
assertEquals(Sets.newHashSet("Hello", "World"), Sets.newHashSet(nameList));
}
}
| 2,297 |
0 |
Create_ds/crunch/crunch-contrib/src/it/java/org/apache/crunch/contrib
|
Create_ds/crunch/crunch-contrib/src/it/java/org/apache/crunch/contrib/bloomfilter/BloomFiltersIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.contrib.bloomfilter;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.crunch.test.CrunchTestSupport;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.bloom.BloomFilter;
import org.apache.hadoop.util.bloom.Key;
import org.junit.Test;
public class BloomFiltersIT extends CrunchTestSupport implements Serializable {
@Test
public void testFilterCreation() throws IOException {
String inputPath = tempDir.copyResourceFileName("shakes.txt");
BloomFilterFn<String> filterFn = new BloomFilterFn<String>() {
@Override
public Collection<Key> generateKeys(String input) {
List<String> parts = Arrays.asList(StringUtils.split(input, " "));
Collection<Key> keys = new HashSet<Key>();
for (String stringpart : parts) {
keys.add(new Key(stringpart.getBytes(Charset.forName("UTF-8"))));
}
return keys;
}
};
Map<String, BloomFilter> filterValues = BloomFilterFactory.createFilter(new Path(inputPath), filterFn).getValue();
assertEquals(1, filterValues.size());
BloomFilter filter = filterValues.get("shakes.txt");
assertTrue(filter.membershipTest(new Key("Mcbeth".getBytes(Charset.forName("UTF-8")))));
assertTrue(filter.membershipTest(new Key("apples".getBytes(Charset.forName("UTF-8")))));
}
}
| 2,298 |
0 |
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* User contributions that may be interesting for special applications.
*
* Things included in this package or its subpackages are maintained
* by the Crunch team, but are too specialized to include them in the
* core library.
*/
package org.apache.crunch.contrib;
| 2,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.