repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null |
orc-main/java/core/src/java/org/apache/orc/impl/BufferChunk.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.common.io.DiskRange;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import java.nio.ByteBuffer;
/**
* The sections of stripe that we have read.
* This might not match diskRange - 1 disk range can be multiple buffer chunks,
* depending on DFS block boundaries.
*/
public class BufferChunk extends DiskRangeList {
private ByteBuffer chunk;
public BufferChunk(long offset, int length) {
super(offset, offset + length);
chunk = null;
}
public BufferChunk(ByteBuffer chunk, long offset) {
super(offset, offset + chunk.remaining());
this.chunk = chunk;
}
public void setChunk(ByteBuffer chunk) {
this.chunk = chunk;
}
@Override
public boolean hasData() {
return chunk != null;
}
@Override
public final String toString() {
if (chunk == null) {
return "data range[" + offset + ", " + end +")";
} else {
boolean makesSense = chunk.remaining() == (end - offset);
return "data range [" + offset + ", " + end + "), size: " + chunk.remaining()
+ (makesSense ? "" : "(!)") + " type: " +
(chunk.isDirect() ? "direct" : "array-backed");
}
}
@Override
public DiskRange sliceAndShift(long offset, long end, long shiftBy) {
assert offset <= end && offset >= this.offset && end <= this.end;
assert offset + shiftBy >= 0;
ByteBuffer sliceBuf = chunk.slice();
int newPos = (int) (offset - this.offset);
int newLimit = newPos + (int) (end - offset);
try {
sliceBuf.position(newPos);
sliceBuf.limit(newLimit);
} catch (Throwable t) {
throw new RuntimeException(
"Failed to slice buffer chunk with range" + " [" + this.offset + ", " + this.end
+ "), position: " + chunk.position() + " limit: " + chunk.limit() + ", "
+ (chunk.isDirect() ? "direct" : "array") + "; to [" + offset + ", " + end + ") "
+ t.getClass(), t);
}
return new BufferChunk(sliceBuf, offset + shiftBy);
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != getClass()) {
return false;
}
BufferChunk ob = (BufferChunk) other;
return chunk.equals(ob.chunk);
}
@Override
public int hashCode() {
return chunk.hashCode();
}
@Override
public ByteBuffer getData() {
return chunk;
}
}
| 3,228 | 29.752381 | 95 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/BufferChunkList.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
/**
* Builds a list of buffer chunks
*/
public class BufferChunkList {
private BufferChunk head;
private BufferChunk tail;
public void add(BufferChunk value) {
if (head == null) {
head = value;
tail = value;
} else {
tail.next = value;
value.prev = tail;
value.next = null;
tail = value;
}
}
public BufferChunk get() {
return head;
}
/**
* Get the nth element of the list
* @param chunk the element number to get from 0
* @return the given element number
*/
public BufferChunk get(int chunk) {
BufferChunk ptr = head;
for(int i=0; i < chunk; ++i) {
ptr = ptr == null ? null : (BufferChunk) ptr.next;
}
return ptr;
}
public void clear() {
head = null;
tail = null;
}
}
| 1,633 | 25.354839 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/ColumnStatisticsImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.orc.BinaryColumnStatistics;
import org.apache.orc.BooleanColumnStatistics;
import org.apache.orc.CollectionColumnStatistics;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.DateColumnStatistics;
import org.apache.orc.DecimalColumnStatistics;
import org.apache.orc.DoubleColumnStatistics;
import org.apache.orc.IntegerColumnStatistics;
import org.apache.orc.OrcProto;
import org.apache.orc.StringColumnStatistics;
import org.apache.orc.TimestampColumnStatistics;
import org.apache.orc.TypeDescription;
import org.threeten.extra.chrono.HybridChronology;
import java.sql.Date;
import java.sql.Timestamp;
import java.time.chrono.ChronoLocalDate;
import java.time.chrono.Chronology;
import java.time.chrono.IsoChronology;
import java.util.TimeZone;
public class ColumnStatisticsImpl implements ColumnStatistics {
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ColumnStatisticsImpl)) {
return false;
}
ColumnStatisticsImpl that = (ColumnStatisticsImpl) o;
if (count != that.count) {
return false;
}
if (hasNull != that.hasNull) {
return false;
}
return bytesOnDisk == that.bytesOnDisk;
}
@Override
public int hashCode() {
int result = (int) (count ^ (count >>> 32));
result = 31 * result + (hasNull ? 1 : 0);
return result;
}
private static final class BooleanStatisticsImpl extends ColumnStatisticsImpl
implements BooleanColumnStatistics {
private long trueCount = 0;
BooleanStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.BucketStatistics bkt = stats.getBucketStatistics();
trueCount = bkt.getCount(0);
}
BooleanStatisticsImpl() {
}
@Override
public void reset() {
super.reset();
trueCount = 0;
}
@Override
public void updateBoolean(boolean value, int repetitions) {
if (value) {
trueCount += repetitions;
}
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof BooleanStatisticsImpl) {
BooleanStatisticsImpl bkt = (BooleanStatisticsImpl) other;
trueCount += bkt.trueCount;
} else {
if (isStatsExists() && trueCount != 0) {
throw new IllegalArgumentException("Incompatible merging of boolean column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder builder = super.serialize();
OrcProto.BucketStatistics.Builder bucket =
OrcProto.BucketStatistics.newBuilder();
bucket.addCount(trueCount);
builder.setBucketStatistics(bucket);
return builder;
}
@Override
public long getFalseCount() {
return getNumberOfValues() - trueCount;
}
@Override
public long getTrueCount() {
return trueCount;
}
@Override
public String toString() {
return super.toString() + " true: " + trueCount;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BooleanStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
BooleanStatisticsImpl that = (BooleanStatisticsImpl) o;
return trueCount == that.trueCount;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (trueCount ^ (trueCount >>> 32));
return result;
}
}
/**
* Column statistics for List and Map types.
*/
private static final class CollectionColumnStatisticsImpl extends ColumnStatisticsImpl
implements CollectionColumnStatistics {
protected long minimum = Long.MAX_VALUE;
protected long maximum = 0;
protected long sum = 0;
CollectionColumnStatisticsImpl() {
super();
}
CollectionColumnStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.CollectionStatistics collStat = stats.getCollectionStatistics();
minimum = collStat.hasMinChildren() ? collStat.getMinChildren() : Long.MAX_VALUE;
maximum = collStat.hasMaxChildren() ? collStat.getMaxChildren() : 0;
sum = collStat.hasTotalChildren() ? collStat.getTotalChildren() : 0;
}
@Override
public void updateCollectionLength(final long length) {
/*
* Here, minimum = minCollectionLength
* maximum = maxCollectionLength
* sum = childCount
*/
if (length < minimum) {
minimum = length;
}
if (length > maximum) {
maximum = length;
}
this.sum += length;
}
@Override
public void reset() {
super.reset();
minimum = Long.MAX_VALUE;
maximum = 0;
sum = 0;
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof CollectionColumnStatisticsImpl) {
CollectionColumnStatisticsImpl otherColl = (CollectionColumnStatisticsImpl) other;
if(count == 0) {
minimum = otherColl.minimum;
maximum = otherColl.maximum;
} else {
if (otherColl.minimum < minimum) {
minimum = otherColl.minimum;
}
if (otherColl.maximum > maximum) {
maximum = otherColl.maximum;
}
}
sum += otherColl.sum;
} else {
if (isStatsExists()) {
throw new IllegalArgumentException("Incompatible merging of collection column statistics");
}
}
super.merge(other);
}
@Override
public long getMinimumChildren() {
return minimum;
}
@Override
public long getMaximumChildren() {
return maximum;
}
@Override
public long getTotalChildren() {
return sum;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (count != 0) {
buf.append(" minChildren: ");
buf.append(minimum);
buf.append(" maxChildren: ");
buf.append(maximum);
if (sum != 0) {
buf.append(" totalChildren: ");
buf.append(sum);
}
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof CollectionColumnStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
CollectionColumnStatisticsImpl that = (CollectionColumnStatisticsImpl) o;
if (minimum != that.minimum) {
return false;
}
if (maximum != that.maximum) {
return false;
}
return sum == that.sum;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (count != 0 ? (int) (minimum ^ (minimum >>> 32)): 0) ;
result = 31 * result + (count != 0 ? (int) (maximum ^ (maximum >>> 32)): 0);
result = 31 * result + (sum != 0 ? (int) (sum ^ (sum >>> 32)): 0);
return result;
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder builder = super.serialize();
OrcProto.CollectionStatistics.Builder collectionStats =
OrcProto.CollectionStatistics.newBuilder();
if (count != 0) {
collectionStats.setMinChildren(minimum);
collectionStats.setMaxChildren(maximum);
}
if (sum != 0) {
collectionStats.setTotalChildren(sum);
}
builder.setCollectionStatistics(collectionStats);
return builder;
}
}
/**
* Implementation of IntegerColumnStatistics
*/
private static final class IntegerStatisticsImpl extends ColumnStatisticsImpl
implements IntegerColumnStatistics {
private long minimum = Long.MAX_VALUE;
private long maximum = Long.MIN_VALUE;
private long sum = 0;
private boolean hasMinimum = false;
private boolean overflow = false;
IntegerStatisticsImpl() {
}
IntegerStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.IntegerStatistics intStat = stats.getIntStatistics();
if (intStat.hasMinimum()) {
hasMinimum = true;
minimum = intStat.getMinimum();
}
if (intStat.hasMaximum()) {
maximum = intStat.getMaximum();
}
if (intStat.hasSum()) {
sum = intStat.getSum();
} else {
overflow = true;
}
}
@Override
public void reset() {
super.reset();
hasMinimum = false;
minimum = Long.MAX_VALUE;
maximum = Long.MIN_VALUE;
sum = 0;
overflow = false;
}
@Override
public void updateInteger(long value, int repetitions) {
if (!hasMinimum) {
hasMinimum = true;
minimum = value;
maximum = value;
} else if (value < minimum) {
minimum = value;
} else if (value > maximum) {
maximum = value;
}
if (!overflow) {
try {
long increment = repetitions > 1
? Math.multiplyExact(value, repetitions)
: value;
sum = Math.addExact(sum, increment);
} catch (ArithmeticException e) {
overflow = true;
}
}
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof IntegerStatisticsImpl) {
IntegerStatisticsImpl otherInt = (IntegerStatisticsImpl) other;
if (!hasMinimum) {
hasMinimum = otherInt.hasMinimum;
minimum = otherInt.minimum;
maximum = otherInt.maximum;
} else if (otherInt.hasMinimum) {
if (otherInt.minimum < minimum) {
minimum = otherInt.minimum;
}
if (otherInt.maximum > maximum) {
maximum = otherInt.maximum;
}
}
overflow |= otherInt.overflow;
if (!overflow) {
try {
sum = Math.addExact(sum, otherInt.sum);
} catch (ArithmeticException e) {
overflow = true;
}
}
} else {
if (isStatsExists() && hasMinimum) {
throw new IllegalArgumentException("Incompatible merging of integer column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder builder = super.serialize();
OrcProto.IntegerStatistics.Builder intb =
OrcProto.IntegerStatistics.newBuilder();
if (hasMinimum) {
intb.setMinimum(minimum);
intb.setMaximum(maximum);
}
if (!overflow) {
intb.setSum(sum);
}
builder.setIntStatistics(intb);
return builder;
}
@Override
public long getMinimum() {
return minimum;
}
@Override
public long getMaximum() {
return maximum;
}
@Override
public boolean isSumDefined() {
return !overflow;
}
@Override
public long getSum() {
return sum;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (hasMinimum) {
buf.append(" min: ");
buf.append(minimum);
buf.append(" max: ");
buf.append(maximum);
}
if (!overflow) {
buf.append(" sum: ");
buf.append(sum);
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof IntegerStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
IntegerStatisticsImpl that = (IntegerStatisticsImpl) o;
if (minimum != that.minimum) {
return false;
}
if (maximum != that.maximum) {
return false;
}
if (sum != that.sum) {
return false;
}
if (hasMinimum != that.hasMinimum) {
return false;
}
return overflow == that.overflow;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (minimum ^ (minimum >>> 32));
result = 31 * result + (int) (maximum ^ (maximum >>> 32));
result = 31 * result + (int) (sum ^ (sum >>> 32));
result = 31 * result + (hasMinimum ? 1 : 0);
result = 31 * result + (overflow ? 1 : 0);
return result;
}
}
private static final class DoubleStatisticsImpl extends ColumnStatisticsImpl
implements DoubleColumnStatistics {
private boolean hasMinimum = false;
private double minimum = Double.MAX_VALUE;
private double maximum = Double.MIN_VALUE;
private double sum = 0;
DoubleStatisticsImpl() {
}
DoubleStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.DoubleStatistics dbl = stats.getDoubleStatistics();
if (dbl.hasMinimum()) {
hasMinimum = true;
minimum = dbl.getMinimum();
}
if (dbl.hasMaximum()) {
maximum = dbl.getMaximum();
}
if (dbl.hasSum()) {
sum = dbl.getSum();
}
}
@Override
public void reset() {
super.reset();
hasMinimum = false;
minimum = Double.MAX_VALUE;
maximum = Double.MIN_VALUE;
sum = 0;
}
@Override
public void updateDouble(double value) {
if (!hasMinimum) {
hasMinimum = true;
minimum = value;
maximum = value;
} else if (value < minimum) {
minimum = value;
} else if (value > maximum) {
maximum = value;
}
sum += value;
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof DoubleStatisticsImpl) {
DoubleStatisticsImpl dbl = (DoubleStatisticsImpl) other;
if (!hasMinimum) {
hasMinimum = dbl.hasMinimum;
minimum = dbl.minimum;
maximum = dbl.maximum;
} else if (dbl.hasMinimum) {
if (dbl.minimum < minimum) {
minimum = dbl.minimum;
}
if (dbl.maximum > maximum) {
maximum = dbl.maximum;
}
}
sum += dbl.sum;
} else {
if (isStatsExists() && hasMinimum) {
throw new IllegalArgumentException("Incompatible merging of double column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder builder = super.serialize();
OrcProto.DoubleStatistics.Builder dbl =
OrcProto.DoubleStatistics.newBuilder();
if (hasMinimum) {
dbl.setMinimum(minimum);
dbl.setMaximum(maximum);
}
dbl.setSum(sum);
builder.setDoubleStatistics(dbl);
return builder;
}
@Override
public double getMinimum() {
return minimum;
}
@Override
public double getMaximum() {
return maximum;
}
@Override
public double getSum() {
return sum;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (hasMinimum) {
buf.append(" min: ");
buf.append(minimum);
buf.append(" max: ");
buf.append(maximum);
}
buf.append(" sum: ");
buf.append(sum);
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DoubleStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
DoubleStatisticsImpl that = (DoubleStatisticsImpl) o;
if (hasMinimum != that.hasMinimum) {
return false;
}
if (Double.compare(that.minimum, minimum) != 0) {
return false;
}
if (Double.compare(that.maximum, maximum) != 0) {
return false;
}
return Double.compare(that.sum, sum) == 0;
}
@Override
public int hashCode() {
int result = super.hashCode();
long temp;
result = 31 * result + (hasMinimum ? 1 : 0);
temp = Double.doubleToLongBits(minimum);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(maximum);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(sum);
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
}
protected static final class StringStatisticsImpl extends ColumnStatisticsImpl
implements StringColumnStatistics {
public static final int MAX_BYTES_RECORDED = 1024;
private Text minimum = null;
private Text maximum = null;
private long sum = 0;
private boolean isLowerBoundSet = false;
private boolean isUpperBoundSet = false;
StringStatisticsImpl() {
}
StringStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.StringStatistics str = stats.getStringStatistics();
if (str.hasMaximum()) {
maximum = new Text(str.getMaximum());
} else if (str.hasUpperBound()) {
maximum = new Text(str.getUpperBound());
isUpperBoundSet = true;
}
if (str.hasMinimum()) {
minimum = new Text(str.getMinimum());
} else if (str.hasLowerBound()) {
minimum = new Text(str.getLowerBound());
isLowerBoundSet = true;
}
if(str.hasSum()) {
sum = str.getSum();
}
}
@Override
public void reset() {
super.reset();
minimum = null;
maximum = null;
isLowerBoundSet = false;
isUpperBoundSet = false;
sum = 0;
}
@Override
public void updateString(Text value) {
updateString(value.getBytes(), 0, value.getLength(), 1);
}
@Override
public void updateString(byte[] bytes, int offset, int length,
int repetitions) {
if (minimum == null) {
if(length > MAX_BYTES_RECORDED) {
minimum = truncateLowerBound(bytes, offset);
maximum = truncateUpperBound(bytes, offset);
isLowerBoundSet = true;
isUpperBoundSet = true;
} else {
maximum = minimum = new Text();
maximum.set(bytes, offset, length);
isLowerBoundSet = false;
isUpperBoundSet = false;
}
} else if (WritableComparator.compareBytes(minimum.getBytes(), 0,
minimum.getLength(), bytes, offset, length) > 0) {
if(length > MAX_BYTES_RECORDED) {
minimum = truncateLowerBound(bytes, offset);
isLowerBoundSet = true;
} else {
minimum = new Text();
minimum.set(bytes, offset, length);
isLowerBoundSet = false;
}
} else if (WritableComparator.compareBytes(maximum.getBytes(), 0,
maximum.getLength(), bytes, offset, length) < 0) {
if(length > MAX_BYTES_RECORDED) {
maximum = truncateUpperBound(bytes, offset);
isUpperBoundSet = true;
} else {
maximum = new Text();
maximum.set(bytes, offset, length);
isUpperBoundSet = false;
}
}
sum += (long)length * repetitions;
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof StringStatisticsImpl) {
StringStatisticsImpl str = (StringStatisticsImpl) other;
if (count == 0) {
if (str.count != 0) {
minimum = new Text(str.minimum);
isLowerBoundSet = str.isLowerBoundSet;
maximum = new Text(str.maximum);
isUpperBoundSet = str.isUpperBoundSet;
} else {
/* both are empty */
maximum = minimum = null;
isLowerBoundSet = false;
isUpperBoundSet = false;
}
} else if (str.count != 0) {
if (minimum.compareTo(str.minimum) > 0) {
minimum = new Text(str.minimum);
isLowerBoundSet = str.isLowerBoundSet;
}
if (maximum.compareTo(str.maximum) < 0) {
maximum = new Text(str.maximum);
isUpperBoundSet = str.isUpperBoundSet;
}
}
sum += str.sum;
} else {
if (isStatsExists()) {
throw new IllegalArgumentException("Incompatible merging of string column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.StringStatistics.Builder str =
OrcProto.StringStatistics.newBuilder();
if (getNumberOfValues() != 0) {
if (isLowerBoundSet) {
str.setLowerBound(minimum.toString());
} else {
str.setMinimum(minimum.toString());
}
if (isUpperBoundSet) {
str.setUpperBound(maximum.toString());
} else {
str.setMaximum(maximum.toString());
}
str.setSum(sum);
}
result.setStringStatistics(str);
return result;
}
@Override
public String getMinimum() {
/* if we have lower bound set (in case of truncation)
getMinimum will be null */
if(isLowerBoundSet) {
return null;
} else {
return minimum == null ? null : minimum.toString();
}
}
@Override
public String getMaximum() {
/* if we have upper bound set (in case of truncation)
getMaximum will be null */
if(isUpperBoundSet) {
return null;
} else {
return maximum == null ? null : maximum.toString();
}
}
/**
* Get the string with
* length = Min(StringStatisticsImpl.MAX_BYTES_RECORDED, getMinimum())
*
* @return lower bound
*/
@Override
public String getLowerBound() {
return minimum == null ? null : minimum.toString();
}
/**
* Get the string with
* length = Min(StringStatisticsImpl.MAX_BYTES_RECORDED, getMaximum())
*
* @return upper bound
*/
@Override
public String getUpperBound() {
return maximum == null ? null : maximum.toString();
}
@Override
public long getSum() {
return sum;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (minimum != null) {
if (isLowerBoundSet) {
buf.append(" lower: ");
} else {
buf.append(" min: ");
}
buf.append(getLowerBound());
if (isUpperBoundSet) {
buf.append(" upper: ");
} else {
buf.append(" max: ");
}
buf.append(getUpperBound());
buf.append(" sum: ");
buf.append(sum);
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof StringStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
StringStatisticsImpl that = (StringStatisticsImpl) o;
if (sum != that.sum) {
return false;
}
if (minimum != null ? !minimum.equals(that.minimum) : that.minimum != null) {
return false;
}
return maximum != null ? maximum.equals(that.maximum) : that.maximum == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (minimum != null ? minimum.hashCode() : 0);
result = 31 * result + (maximum != null ? maximum.hashCode() : 0);
result = 31 * result + (int) (sum ^ (sum >>> 32));
return result;
}
private static void appendCodePoint(Text result, int codepoint) {
if (codepoint < 0 || codepoint > 0x1f_ffff) {
throw new IllegalArgumentException("Codepoint out of range " +
codepoint);
}
byte[] buffer = new byte[4];
if (codepoint < 0x7f) {
buffer[0] = (byte) codepoint;
result.append(buffer, 0, 1);
} else if (codepoint <= 0x7ff) {
buffer[0] = (byte) (0xc0 | (codepoint >> 6));
buffer[1] = (byte) (0x80 | (codepoint & 0x3f));
result.append(buffer, 0 , 2);
} else if (codepoint < 0xffff) {
buffer[0] = (byte) (0xe0 | (codepoint >> 12));
buffer[1] = (byte) (0x80 | ((codepoint >> 6) & 0x3f));
buffer[2] = (byte) (0x80 | (codepoint & 0x3f));
result.append(buffer, 0, 3);
} else {
buffer[0] = (byte) (0xf0 | (codepoint >> 18));
buffer[1] = (byte) (0x80 | ((codepoint >> 12) & 0x3f));
buffer[2] = (byte) (0x80 | ((codepoint >> 6) & 0x3f));
buffer[3] = (byte) (0x80 | (codepoint & 0x3f));
result.append(buffer, 0, 4);
}
}
/**
* Create a text that is truncated to at most MAX_BYTES_RECORDED at a
* character boundary with the last code point incremented by 1.
* The length is assumed to be greater than MAX_BYTES_RECORDED.
* @param text the text to truncate
* @param from the index of the first character
* @return truncated Text value
*/
private static Text truncateUpperBound(final byte[] text, final int from) {
int followingChar = Utf8Utils.findLastCharacter(text, from,
from + MAX_BYTES_RECORDED);
int lastChar = Utf8Utils.findLastCharacter(text, from, followingChar - 1);
Text result = new Text();
result.set(text, from, lastChar - from);
appendCodePoint(result,
Utf8Utils.getCodePoint(text, lastChar, followingChar - lastChar) + 1);
return result;
}
/**
* Create a text that is truncated to at most MAX_BYTES_RECORDED at a
* character boundary.
* The length is assumed to be greater than MAX_BYTES_RECORDED.
* @param text Byte array to truncate
* @param from This is the index of the first character
* @return truncated {@link Text}
*/
private static Text truncateLowerBound(final byte[] text, final int from) {
int lastChar = Utf8Utils.findLastCharacter(text, from,
from + MAX_BYTES_RECORDED);
Text result = new Text();
result.set(text, from, lastChar - from);
return result;
}
}
protected static final class BinaryStatisticsImpl extends ColumnStatisticsImpl implements
BinaryColumnStatistics {
private long sum = 0;
BinaryStatisticsImpl() {
}
BinaryStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.BinaryStatistics binStats = stats.getBinaryStatistics();
if (binStats.hasSum()) {
sum = binStats.getSum();
}
}
@Override
public void reset() {
super.reset();
sum = 0;
}
@Override
public void updateBinary(BytesWritable value) {
sum += value.getLength();
}
@Override
public void updateBinary(byte[] bytes, int offset, int length,
int repetitions) {
sum += (long)length * repetitions;
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof BinaryColumnStatistics) {
BinaryStatisticsImpl bin = (BinaryStatisticsImpl) other;
sum += bin.sum;
} else {
if (isStatsExists() && sum != 0) {
throw new IllegalArgumentException("Incompatible merging of binary column statistics");
}
}
super.merge(other);
}
@Override
public long getSum() {
return sum;
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.BinaryStatistics.Builder bin = OrcProto.BinaryStatistics.newBuilder();
bin.setSum(sum);
result.setBinaryStatistics(bin);
return result;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (getNumberOfValues() != 0) {
buf.append(" sum: ");
buf.append(sum);
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BinaryStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
BinaryStatisticsImpl that = (BinaryStatisticsImpl) o;
return sum == that.sum;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (sum ^ (sum >>> 32));
return result;
}
}
private static final class DecimalStatisticsImpl extends ColumnStatisticsImpl
implements DecimalColumnStatistics {
// These objects are mutable for better performance.
private HiveDecimalWritable minimum = null;
private HiveDecimalWritable maximum = null;
private HiveDecimalWritable sum = new HiveDecimalWritable(0);
DecimalStatisticsImpl() {
}
DecimalStatisticsImpl(OrcProto.ColumnStatistics stats) {
super(stats);
OrcProto.DecimalStatistics dec = stats.getDecimalStatistics();
if (dec.hasMaximum()) {
maximum = new HiveDecimalWritable(dec.getMaximum());
}
if (dec.hasMinimum()) {
minimum = new HiveDecimalWritable(dec.getMinimum());
}
if (dec.hasSum()) {
sum = new HiveDecimalWritable(dec.getSum());
} else {
sum = null;
}
}
@Override
public void reset() {
super.reset();
minimum = null;
maximum = null;
sum = new HiveDecimalWritable(0);
}
@Override
public void updateDecimal(HiveDecimalWritable value) {
if (minimum == null) {
minimum = new HiveDecimalWritable(value);
maximum = new HiveDecimalWritable(value);
} else if (minimum.compareTo(value) > 0) {
minimum.set(value);
} else if (maximum.compareTo(value) < 0) {
maximum.set(value);
}
if (sum != null) {
sum.mutateAdd(value);
}
}
@Override
public void updateDecimal64(long value, int scale) {
HiveDecimalWritable dValue = new HiveDecimalWritable();
dValue.setFromLongAndScale(value, scale);
updateDecimal(dValue);
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof DecimalStatisticsImpl) {
DecimalStatisticsImpl dec = (DecimalStatisticsImpl) other;
if (minimum == null) {
minimum = (dec.minimum != null ? new HiveDecimalWritable(dec.minimum) : null);
maximum = (dec.maximum != null ? new HiveDecimalWritable(dec.maximum) : null);
sum = dec.sum;
} else if (dec.minimum != null) {
if (minimum.compareTo(dec.minimum) > 0) {
minimum.set(dec.minimum);
}
if (maximum.compareTo(dec.maximum) < 0) {
maximum.set(dec.maximum);
}
if (sum == null || dec.sum == null) {
sum = null;
} else {
sum.mutateAdd(dec.sum);
}
}
} else {
if (isStatsExists() && minimum != null) {
throw new IllegalArgumentException("Incompatible merging of decimal column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.DecimalStatistics.Builder dec =
OrcProto.DecimalStatistics.newBuilder();
if (getNumberOfValues() != 0 && minimum != null) {
dec.setMinimum(minimum.toString());
dec.setMaximum(maximum.toString());
}
// Check isSet for overflow.
if (sum != null && sum.isSet()) {
dec.setSum(sum.toString());
}
result.setDecimalStatistics(dec);
return result;
}
@Override
public HiveDecimal getMinimum() {
return minimum == null ? null : minimum.getHiveDecimal();
}
@Override
public HiveDecimal getMaximum() {
return maximum == null ? null : maximum.getHiveDecimal();
}
@Override
public HiveDecimal getSum() {
return sum == null ? null : sum.getHiveDecimal();
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (getNumberOfValues() != 0) {
buf.append(" min: ");
buf.append(minimum);
buf.append(" max: ");
buf.append(maximum);
if (sum != null) {
buf.append(" sum: ");
buf.append(sum);
}
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DecimalStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
DecimalStatisticsImpl that = (DecimalStatisticsImpl) o;
if (minimum != null ? !minimum.equals(that.minimum) : that.minimum != null) {
return false;
}
if (maximum != null ? !maximum.equals(that.maximum) : that.maximum != null) {
return false;
}
return sum != null ? sum.equals(that.sum) : that.sum == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (minimum != null ? minimum.hashCode() : 0);
result = 31 * result + (maximum != null ? maximum.hashCode() : 0);
result = 31 * result + (sum != null ? sum.hashCode() : 0);
return result;
}
}
private static final class Decimal64StatisticsImpl extends ColumnStatisticsImpl
implements DecimalColumnStatistics {
private final int scale;
private long minimum = Long.MAX_VALUE;
private long maximum = Long.MIN_VALUE;
private boolean hasSum = true;
private long sum = 0;
private final HiveDecimalWritable scratch = new HiveDecimalWritable();
Decimal64StatisticsImpl(int scale) {
this.scale = scale;
}
Decimal64StatisticsImpl(int scale, OrcProto.ColumnStatistics stats) {
super(stats);
this.scale = scale;
OrcProto.DecimalStatistics dec = stats.getDecimalStatistics();
if (dec.hasMaximum()) {
maximum = new HiveDecimalWritable(dec.getMaximum()).serialize64(scale);
} else {
maximum = Long.MIN_VALUE;
}
if (dec.hasMinimum()) {
minimum = new HiveDecimalWritable(dec.getMinimum()).serialize64(scale);
} else {
minimum = Long.MAX_VALUE;
}
if (dec.hasSum()) {
hasSum = true;
HiveDecimalWritable sumTmp = new HiveDecimalWritable(dec.getSum());
if (sumTmp.getHiveDecimal().integerDigitCount() + scale <=
TypeDescription.MAX_DECIMAL64_PRECISION) {
hasSum = true;
sum = sumTmp.serialize64(scale);
return;
}
}
hasSum = false;
}
@Override
public void reset() {
super.reset();
minimum = Long.MAX_VALUE;
maximum = Long.MIN_VALUE;
hasSum = true;
sum = 0;
}
@Override
public void updateDecimal(HiveDecimalWritable value) {
updateDecimal64(value.serialize64(scale), scale);
}
@Override
public void updateDecimal64(long value, int valueScale) {
// normalize the scale to our desired level
while (valueScale != scale) {
if (valueScale > scale) {
value /= 10;
valueScale -= 1;
} else {
value *= 10;
valueScale += 1;
}
}
if (value < TypeDescription.MIN_DECIMAL64 ||
value > TypeDescription.MAX_DECIMAL64) {
throw new IllegalArgumentException("Out of bounds decimal64 " + value);
}
if (minimum > value) {
minimum = value;
}
if (maximum < value) {
maximum = value;
}
if (hasSum) {
sum += value;
hasSum = sum <= TypeDescription.MAX_DECIMAL64 &&
sum >= TypeDescription.MIN_DECIMAL64;
}
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof Decimal64StatisticsImpl) {
Decimal64StatisticsImpl dec = (Decimal64StatisticsImpl) other;
if (getNumberOfValues() == 0) {
minimum = dec.minimum;
maximum = dec.maximum;
sum = dec.sum;
} else {
if (minimum > dec.minimum) {
minimum = dec.minimum;
}
if (maximum < dec.maximum) {
maximum = dec.maximum;
}
if (hasSum && dec.hasSum) {
sum += dec.sum;
hasSum = sum <= TypeDescription.MAX_DECIMAL64 &&
sum >= TypeDescription.MIN_DECIMAL64;
} else {
hasSum = false;
}
}
} else {
if (other.getNumberOfValues() != 0) {
throw new IllegalArgumentException("Incompatible merging of decimal column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.DecimalStatistics.Builder dec =
OrcProto.DecimalStatistics.newBuilder();
if (getNumberOfValues() != 0) {
scratch.setFromLongAndScale(minimum, scale);
dec.setMinimum(scratch.toString());
scratch.setFromLongAndScale(maximum, scale);
dec.setMaximum(scratch.toString());
}
// Check hasSum for overflow.
if (hasSum) {
scratch.setFromLongAndScale(sum, scale);
dec.setSum(scratch.toString());
}
result.setDecimalStatistics(dec);
return result;
}
@Override
public HiveDecimal getMinimum() {
if (getNumberOfValues() > 0) {
scratch.setFromLongAndScale(minimum, scale);
return scratch.getHiveDecimal();
}
return null;
}
@Override
public HiveDecimal getMaximum() {
if (getNumberOfValues() > 0) {
scratch.setFromLongAndScale(maximum, scale);
return scratch.getHiveDecimal();
}
return null;
}
@Override
public HiveDecimal getSum() {
if (hasSum) {
scratch.setFromLongAndScale(sum, scale);
return scratch.getHiveDecimal();
}
return null;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (getNumberOfValues() != 0) {
buf.append(" min: ");
buf.append(getMinimum());
buf.append(" max: ");
buf.append(getMaximum());
if (hasSum) {
buf.append(" sum: ");
buf.append(getSum());
}
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Decimal64StatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
Decimal64StatisticsImpl that = (Decimal64StatisticsImpl) o;
if (minimum != that.minimum ||
maximum != that.maximum ||
hasSum != that.hasSum) {
return false;
}
return !hasSum || (sum == that.sum);
}
@Override
public int hashCode() {
int result = super.hashCode();
boolean hasValues = getNumberOfValues() > 0;
result = 31 * result + (hasValues ? (int) minimum : 0);
result = 31 * result + (hasValues ? (int) maximum : 0);
result = 31 * result + (hasSum ? (int) sum : 0);
return result;
}
}
private static final class DateStatisticsImpl extends ColumnStatisticsImpl
implements DateColumnStatistics {
private int minimum = Integer.MAX_VALUE;
private int maximum = Integer.MIN_VALUE;
private final Chronology chronology;
static Chronology getInstance(boolean proleptic) {
return proleptic ? IsoChronology.INSTANCE : HybridChronology.INSTANCE;
}
DateStatisticsImpl(boolean convertToProleptic) {
this.chronology = getInstance(convertToProleptic);
}
DateStatisticsImpl(OrcProto.ColumnStatistics stats,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
super(stats);
this.chronology = getInstance(convertToProlepticGregorian);
OrcProto.DateStatistics dateStats = stats.getDateStatistics();
// min,max values serialized/deserialized as int (days since epoch)
if (dateStats.hasMaximum()) {
maximum = DateUtils.convertDate(dateStats.getMaximum(),
writerUsedProlepticGregorian, convertToProlepticGregorian);
}
if (dateStats.hasMinimum()) {
minimum = DateUtils.convertDate(dateStats.getMinimum(),
writerUsedProlepticGregorian, convertToProlepticGregorian);
}
}
@Override
public void reset() {
super.reset();
minimum = Integer.MAX_VALUE;
maximum = Integer.MIN_VALUE;
}
@Override
public void updateDate(DateWritable value) {
if (minimum > value.getDays()) {
minimum = value.getDays();
}
if (maximum < value.getDays()) {
maximum = value.getDays();
}
}
@Override
public void updateDate(int value) {
if (minimum > value) {
minimum = value;
}
if (maximum < value) {
maximum = value;
}
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof DateStatisticsImpl) {
DateStatisticsImpl dateStats = (DateStatisticsImpl) other;
minimum = Math.min(minimum, dateStats.minimum);
maximum = Math.max(maximum, dateStats.maximum);
} else {
if (isStatsExists() && count != 0) {
throw new IllegalArgumentException("Incompatible merging of date column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.DateStatistics.Builder dateStats =
OrcProto.DateStatistics.newBuilder();
if (count != 0) {
dateStats.setMinimum(minimum);
dateStats.setMaximum(maximum);
}
result.setDateStatistics(dateStats);
return result;
}
@Override
public ChronoLocalDate getMinimumLocalDate() {
return count == 0 ? null : chronology.dateEpochDay(minimum);
}
@Override
public long getMinimumDayOfEpoch() {
return minimum;
}
@Override
public ChronoLocalDate getMaximumLocalDate() {
return count == 0 ? null : chronology.dateEpochDay(maximum);
}
@Override
public long getMaximumDayOfEpoch() {
return maximum;
}
@Override
public Date getMinimum() {
if (count == 0) {
return null;
}
DateWritable minDate = new DateWritable(minimum);
return minDate.get();
}
@Override
public Date getMaximum() {
if (count == 0) {
return null;
}
DateWritable maxDate = new DateWritable(maximum);
return maxDate.get();
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (getNumberOfValues() != 0) {
buf.append(" min: ");
buf.append(getMinimumLocalDate());
buf.append(" max: ");
buf.append(getMaximumLocalDate());
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DateStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
DateStatisticsImpl that = (DateStatisticsImpl) o;
if (minimum != that.minimum) {
return false;
}
return maximum == that.maximum;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + minimum;
result = 31 * result + maximum;
return result;
}
}
private static class TimestampStatisticsImpl extends ColumnStatisticsImpl
implements TimestampColumnStatistics {
private static final int DEFAULT_MIN_NANOS = 000_000;
private static final int DEFAULT_MAX_NANOS = 999_999;
private long minimum = Long.MAX_VALUE;
private long maximum = Long.MIN_VALUE;
private int minNanos = DEFAULT_MIN_NANOS;
private int maxNanos = DEFAULT_MAX_NANOS;
TimestampStatisticsImpl() {
}
TimestampStatisticsImpl(OrcProto.ColumnStatistics stats,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
super(stats);
OrcProto.TimestampStatistics timestampStats = stats.getTimestampStatistics();
// min,max values serialized/deserialized as int (milliseconds since epoch)
if (timestampStats.hasMaximum()) {
maximum = DateUtils.convertTime(
SerializationUtils.convertToUtc(TimeZone.getDefault(),
timestampStats.getMaximum()),
writerUsedProlepticGregorian, convertToProlepticGregorian, true);
}
if (timestampStats.hasMinimum()) {
minimum = DateUtils.convertTime(
SerializationUtils.convertToUtc(TimeZone.getDefault(),
timestampStats.getMinimum()),
writerUsedProlepticGregorian, convertToProlepticGregorian, true);
}
if (timestampStats.hasMaximumUtc()) {
maximum = DateUtils.convertTime(timestampStats.getMaximumUtc(),
writerUsedProlepticGregorian, convertToProlepticGregorian, true);
}
if (timestampStats.hasMinimumUtc()) {
minimum = DateUtils.convertTime(timestampStats.getMinimumUtc(),
writerUsedProlepticGregorian, convertToProlepticGregorian, true);
}
if (timestampStats.hasMaximumNanos()) {
maxNanos = timestampStats.getMaximumNanos() - 1;
}
if (timestampStats.hasMinimumNanos()) {
minNanos = timestampStats.getMinimumNanos() - 1;
}
}
@Override
public void reset() {
super.reset();
minimum = Long.MAX_VALUE;
maximum = Long.MIN_VALUE;
minNanos = DEFAULT_MIN_NANOS;
maxNanos = DEFAULT_MAX_NANOS;
}
@Override
public void updateTimestamp(Timestamp value) {
long millis = SerializationUtils.convertToUtc(TimeZone.getDefault(),
value.getTime());
// prune the last 6 digits for ns precision
updateTimestamp(millis, value.getNanos() % 1_000_000);
}
@Override
public void updateTimestamp(long value, int nanos) {
if (minimum > maximum) {
minimum = value;
maximum = value;
minNanos = nanos;
maxNanos = nanos;
} else {
if (minimum >= value) {
if (minimum > value || nanos < minNanos) {
minNanos = nanos;
}
minimum = value;
}
if (maximum <= value) {
if (maximum < value || nanos > maxNanos) {
maxNanos = nanos;
}
maximum = value;
}
}
}
@Override
public void merge(ColumnStatisticsImpl other) {
if (other instanceof TimestampStatisticsImpl) {
TimestampStatisticsImpl timestampStats = (TimestampStatisticsImpl) other;
if (count == 0) {
if (timestampStats.count != 0) {
minimum = timestampStats.minimum;
maximum = timestampStats.maximum;
minNanos = timestampStats.minNanos;
maxNanos = timestampStats.maxNanos;
}
} else if (timestampStats.count != 0) {
if (minimum >= timestampStats.minimum) {
if (minimum > timestampStats.minimum ||
minNanos > timestampStats.minNanos) {
minNanos = timestampStats.minNanos;
}
minimum = timestampStats.minimum;
}
if (maximum <= timestampStats.maximum) {
if (maximum < timestampStats.maximum ||
maxNanos < timestampStats.maxNanos) {
maxNanos = timestampStats.maxNanos;
}
maximum = timestampStats.maximum;
}
}
} else {
if (isStatsExists() && count != 0) {
throw new IllegalArgumentException("Incompatible merging of timestamp column statistics");
}
}
super.merge(other);
}
@Override
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder result = super.serialize();
OrcProto.TimestampStatistics.Builder timestampStats = OrcProto.TimestampStatistics
.newBuilder();
if (getNumberOfValues() != 0) {
timestampStats.setMinimumUtc(minimum);
timestampStats.setMaximumUtc(maximum);
if (minNanos != DEFAULT_MIN_NANOS) {
timestampStats.setMinimumNanos(minNanos + 1);
}
if (maxNanos != DEFAULT_MAX_NANOS) {
timestampStats.setMaximumNanos(maxNanos + 1);
}
}
result.setTimestampStatistics(timestampStats);
return result;
}
@Override
public Timestamp getMinimum() {
if (minimum > maximum) {
return null;
} else {
Timestamp ts = new Timestamp(SerializationUtils.
convertFromUtc(TimeZone.getDefault(), minimum));
ts.setNanos(ts.getNanos() + minNanos);
return ts;
}
}
@Override
public Timestamp getMaximum() {
if (minimum > maximum) {
return null;
} else {
Timestamp ts = new Timestamp(SerializationUtils.convertFromUtc(
TimeZone.getDefault(), maximum));
ts.setNanos(ts.getNanos() + maxNanos);
return ts;
}
}
@Override
public Timestamp getMinimumUTC() {
if (minimum > maximum) {
return null;
} else {
Timestamp ts = new Timestamp(minimum);
ts.setNanos(ts.getNanos() + minNanos);
return ts;
}
}
@Override
public Timestamp getMaximumUTC() {
if (minimum > maximum) {
return null;
} else {
Timestamp ts = new Timestamp(maximum);
ts.setNanos(ts.getNanos() + maxNanos);
return ts;
}
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(super.toString());
if (minimum <= maximum) {
buf.append(" min: ");
buf.append(getMinimum());
buf.append(" max: ");
buf.append(getMaximum());
}
return buf.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TimestampStatisticsImpl)) {
return false;
}
if (!super.equals(o)) {
return false;
}
TimestampStatisticsImpl that = (TimestampStatisticsImpl) o;
return minimum == that.minimum && maximum == that.maximum &&
minNanos == that.minNanos && maxNanos == that.maxNanos;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + (int) (maximum ^ (maximum >>> 32));
result = prime * result + (int) (minimum ^ (minimum >>> 32));
return result;
}
}
private static final class TimestampInstantStatisticsImpl extends TimestampStatisticsImpl {
TimestampInstantStatisticsImpl() {
}
TimestampInstantStatisticsImpl(OrcProto.ColumnStatistics stats,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
super(stats, writerUsedProlepticGregorian, convertToProlepticGregorian);
}
@Override
public void updateTimestamp(Timestamp value) {
updateTimestamp(value.getTime(), value.getNanos() % 1_000_000);
}
@Override
public Timestamp getMinimum() {
return getMinimumUTC();
}
@Override
public Timestamp getMaximum() {
return getMaximumUTC();
}
}
protected long count = 0;
private boolean hasNull = false;
private long bytesOnDisk = 0;
ColumnStatisticsImpl(OrcProto.ColumnStatistics stats) {
if (stats.hasNumberOfValues()) {
count = stats.getNumberOfValues();
}
bytesOnDisk = stats.hasBytesOnDisk() ? stats.getBytesOnDisk() : 0;
if (stats.hasHasNull()) {
hasNull = stats.getHasNull();
} else {
hasNull = true;
}
}
ColumnStatisticsImpl() {
}
public void increment() {
count += 1;
}
public void increment(int count) {
this.count += count;
}
public void updateByteCount(long size) {
this.bytesOnDisk += size;
}
public void setNull() {
hasNull = true;
}
/**
* Update the collection length for Map and List type.
* @param value length of collection
*/
public void updateCollectionLength(final long value) {
throw new UnsupportedOperationException(
"Can't update collection count");
}
public void updateBoolean(boolean value, int repetitions) {
throw new UnsupportedOperationException("Can't update boolean");
}
public void updateInteger(long value, int repetitions) {
throw new UnsupportedOperationException("Can't update integer");
}
public void updateDouble(double value) {
throw new UnsupportedOperationException("Can't update double");
}
public void updateString(Text value) {
throw new UnsupportedOperationException("Can't update string");
}
public void updateString(byte[] bytes, int offset, int length,
int repetitions) {
throw new UnsupportedOperationException("Can't update string");
}
public void updateBinary(BytesWritable value) {
throw new UnsupportedOperationException("Can't update binary");
}
public void updateBinary(byte[] bytes, int offset, int length,
int repetitions) {
throw new UnsupportedOperationException("Can't update string");
}
public void updateDecimal(HiveDecimalWritable value) {
throw new UnsupportedOperationException("Can't update decimal");
}
public void updateDecimal64(long value, int scale) {
throw new UnsupportedOperationException("Can't update decimal");
}
public void updateDate(DateWritable value) {
throw new UnsupportedOperationException("Can't update date");
}
public void updateDate(int value) {
throw new UnsupportedOperationException("Can't update date");
}
public void updateTimestamp(Timestamp value) {
throw new UnsupportedOperationException("Can't update timestamp");
}
// has to be extended
public void updateTimestamp(long value, int nanos) {
throw new UnsupportedOperationException("Can't update timestamp");
}
public boolean isStatsExists() {
return (count > 0 || hasNull == true);
}
public void merge(ColumnStatisticsImpl stats) {
count += stats.count;
hasNull |= stats.hasNull;
bytesOnDisk += stats.bytesOnDisk;
}
public void reset() {
count = 0;
bytesOnDisk = 0;
hasNull = false;
}
@Override
public long getNumberOfValues() {
return count;
}
@Override
public boolean hasNull() {
return hasNull;
}
/**
* Get the number of bytes for this column.
*
* @return the number of bytes
*/
@Override
public long getBytesOnDisk() {
return bytesOnDisk;
}
@Override
public String toString() {
return "count: " + count + " hasNull: " + hasNull +
(bytesOnDisk != 0 ? " bytesOnDisk: " + bytesOnDisk : "");
}
public OrcProto.ColumnStatistics.Builder serialize() {
OrcProto.ColumnStatistics.Builder builder =
OrcProto.ColumnStatistics.newBuilder();
builder.setNumberOfValues(count);
builder.setHasNull(hasNull);
if (bytesOnDisk != 0) {
builder.setBytesOnDisk(bytesOnDisk);
}
return builder;
}
public static ColumnStatisticsImpl create(TypeDescription schema) {
return create(schema, false);
}
public static ColumnStatisticsImpl create(TypeDescription schema,
boolean convertToProleptic) {
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanStatisticsImpl();
case BYTE:
case SHORT:
case INT:
case LONG:
return new IntegerStatisticsImpl();
case LIST:
case MAP:
return new CollectionColumnStatisticsImpl();
case FLOAT:
case DOUBLE:
return new DoubleStatisticsImpl();
case STRING:
case CHAR:
case VARCHAR:
return new StringStatisticsImpl();
case DECIMAL:
if (schema.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
return new Decimal64StatisticsImpl(schema.getScale());
} else {
return new DecimalStatisticsImpl();
}
case DATE:
return new DateStatisticsImpl(convertToProleptic);
case TIMESTAMP:
return new TimestampStatisticsImpl();
case TIMESTAMP_INSTANT:
return new TimestampInstantStatisticsImpl();
case BINARY:
return new BinaryStatisticsImpl();
default:
return new ColumnStatisticsImpl();
}
}
public static ColumnStatisticsImpl deserialize(TypeDescription schema,
OrcProto.ColumnStatistics stats) {
return deserialize(schema, stats, true, true);
}
public static ColumnStatisticsImpl deserialize(TypeDescription schema,
OrcProto.ColumnStatistics stats,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
if (stats.hasBucketStatistics()) {
return new BooleanStatisticsImpl(stats);
} else if (stats.hasIntStatistics()) {
return new IntegerStatisticsImpl(stats);
} else if (stats.hasCollectionStatistics()) {
return new CollectionColumnStatisticsImpl(stats);
} else if (stats.hasDoubleStatistics()) {
return new DoubleStatisticsImpl(stats);
} else if (stats.hasStringStatistics()) {
return new StringStatisticsImpl(stats);
} else if (stats.hasDecimalStatistics()) {
if (schema != null &&
schema.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
return new Decimal64StatisticsImpl(schema.getScale(), stats);
} else {
return new DecimalStatisticsImpl(stats);
}
} else if (stats.hasDateStatistics()) {
return new DateStatisticsImpl(stats, writerUsedProlepticGregorian,
convertToProlepticGregorian);
} else if (stats.hasTimestampStatistics()) {
return schema == null ||
schema.getCategory() == TypeDescription.Category.TIMESTAMP ?
new TimestampStatisticsImpl(stats,
writerUsedProlepticGregorian, convertToProlepticGregorian) :
new TimestampInstantStatisticsImpl(stats,
writerUsedProlepticGregorian, convertToProlepticGregorian);
} else if(stats.hasBinaryStatistics()) {
return new BinaryStatisticsImpl(stats);
} else {
return new ColumnStatisticsImpl(stats);
}
}
}
| 60,648 | 27.757231 | 101 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/ConvertTreeReaderFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
import org.apache.hadoop.hive.ql.io.filter.FilterContext;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.TypeDescription.Category;
import org.apache.orc.impl.reader.StripePlanner;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.threeten.extra.chrono.HybridChronology;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.chrono.Chronology;
import java.time.chrono.IsoChronology;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeFormatterBuilder;
import java.time.format.DateTimeParseException;
import java.time.format.SignStyle;
import java.time.temporal.ChronoField;
import java.util.EnumMap;
import java.util.TimeZone;
/**
* Convert ORC tree readers.
*/
public class ConvertTreeReaderFactory extends TreeReaderFactory {
/**
* Override methods like checkEncoding to pass-thru to the convert TreeReader.
*/
public static class ConvertTreeReader extends TreeReader {
TypeReader fromReader;
ConvertTreeReader(int columnId, TypeReader fromReader, Context context) throws IOException {
super(columnId, context);
this.fromReader = fromReader;
}
// The ordering of types here is used to determine which numeric types
// are common/convertible to one another. Probably better to rely on the
// ordering explicitly defined here than to assume that the enum values
// that were arbitrarily assigned in PrimitiveCategory work for our purposes.
private static EnumMap<TypeDescription.Category, Integer> numericTypes =
new EnumMap<>(TypeDescription.Category.class);
static {
registerNumericType(TypeDescription.Category.BOOLEAN, 1);
registerNumericType(TypeDescription.Category.BYTE, 2);
registerNumericType(TypeDescription.Category.SHORT, 3);
registerNumericType(TypeDescription.Category.INT, 4);
registerNumericType(TypeDescription.Category.LONG, 5);
registerNumericType(TypeDescription.Category.FLOAT, 6);
registerNumericType(TypeDescription.Category.DOUBLE, 7);
registerNumericType(TypeDescription.Category.DECIMAL, 8);
}
private static void registerNumericType(TypeDescription.Category kind, int level) {
numericTypes.put(kind, level);
}
static TreeReader getStringGroupTreeReader(int columnId,
TypeDescription fileType,
Context context) throws IOException {
switch (fileType.getCategory()) {
case STRING:
return new StringTreeReader(columnId, context);
case CHAR:
return new CharTreeReader(columnId, fileType.getMaxLength(), context);
case VARCHAR:
return new VarcharTreeReader(columnId, fileType.getMaxLength(), context);
default:
throw new RuntimeException("Unexpected type kind " + fileType.getCategory().name());
}
}
protected void assignStringGroupVectorEntry(BytesColumnVector bytesColVector,
int elementNum,
TypeDescription readerType,
byte[] bytes) {
assignStringGroupVectorEntry(bytesColVector,
elementNum, readerType, bytes, 0, bytes.length);
}
/*
* Assign a BytesColumnVector entry when we have a byte array, start, and
* length for the string group which can be (STRING, CHAR, VARCHAR).
*/
protected void assignStringGroupVectorEntry(BytesColumnVector bytesColVector,
int elementNum,
TypeDescription readerType,
byte[] bytes,
int start,
int length) {
switch (readerType.getCategory()) {
case STRING:
bytesColVector.setVal(elementNum, bytes, start, length);
break;
case CHAR:
int charAdjustedDownLen =
StringExpr.rightTrimAndTruncate(bytes, start, length, readerType.getMaxLength());
bytesColVector.setVal(elementNum, bytes, start, charAdjustedDownLen);
break;
case VARCHAR:
int varcharAdjustedDownLen =
StringExpr.truncate(bytes, start, length, readerType.getMaxLength());
bytesColVector.setVal(elementNum, bytes, start, varcharAdjustedDownLen);
break;
default:
throw new RuntimeException("Unexpected type kind " + readerType.getCategory().name());
}
}
protected void convertStringGroupVectorElement(BytesColumnVector bytesColVector,
int elementNum, TypeDescription readerType) {
switch (readerType.getCategory()) {
case STRING:
// No conversion needed.
break;
case CHAR:
int charLength = bytesColVector.length[elementNum];
int charAdjustedDownLen = StringExpr
.rightTrimAndTruncate(bytesColVector.vector[elementNum],
bytesColVector.start[elementNum], charLength,
readerType.getMaxLength());
if (charAdjustedDownLen < charLength) {
bytesColVector.length[elementNum] = charAdjustedDownLen;
}
break;
case VARCHAR:
int varcharLength = bytesColVector.length[elementNum];
int varcharAdjustedDownLen = StringExpr
.truncate(bytesColVector.vector[elementNum],
bytesColVector.start[elementNum], varcharLength,
readerType.getMaxLength());
if (varcharAdjustedDownLen < varcharLength) {
bytesColVector.length[elementNum] = varcharAdjustedDownLen;
}
break;
default:
throw new RuntimeException("Unexpected type kind " + readerType.getCategory().name());
}
}
private boolean isParseError;
/*
* We do this because we want the various parse methods return a primitive.
*
* @return true if there was a parse error in the last call to
* parseLongFromString, etc.
*/
protected boolean getIsParseError() {
return isParseError;
}
protected long parseLongFromString(String string) {
try {
long longValue = Long.parseLong(string);
isParseError = false;
return longValue;
} catch (NumberFormatException e) {
isParseError = true;
return 0;
}
}
protected float parseFloatFromString(String string) {
try {
float floatValue = Float.parseFloat(string);
isParseError = false;
return floatValue;
} catch (NumberFormatException e) {
isParseError = true;
return Float.NaN;
}
}
protected double parseDoubleFromString(String string) {
try {
double value = Double.parseDouble(string);
isParseError = false;
return value;
} catch (NumberFormatException e) {
isParseError = true;
return Double.NaN;
}
}
/**
* @param string
* @return the HiveDecimal parsed, or null if there was a parse error.
*/
protected HiveDecimal parseDecimalFromString(String string) {
try {
HiveDecimal value = HiveDecimal.create(string);
return value;
} catch (NumberFormatException e) {
return null;
}
}
private static final double MIN_LONG_AS_DOUBLE = -0x1p63;
/*
* We cannot store Long.MAX_VALUE as a double without losing precision. Instead, we store
* Long.MAX_VALUE + 1 == -Long.MIN_VALUE, and then offset all comparisons by 1.
*/
private static final double MAX_LONG_AS_DOUBLE_PLUS_ONE = 0x1p63;
public boolean doubleCanFitInLong(double doubleValue) {
// Borrowed from Guava DoubleMath.roundToLong except do not want dependency on Guava and we
// don't want to catch an exception.
return ((MIN_LONG_AS_DOUBLE - doubleValue < 1.0) &&
(doubleValue < MAX_LONG_AS_DOUBLE_PLUS_ONE));
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
// Pass-thru.
fromReader.checkEncoding(encoding);
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
// Pass-thru.
fromReader.startStripe(planner, readPhase);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
// Pass-thru.
fromReader.seek(index, readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
// Pass-thru.
fromReader.seek(index, readPhase);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
// Pass-thru.
fromReader.skipRows(items, readPhase);
}
/**
* Override this to use convertVector.
* Source and result are member variables in the subclass with the right
* type.
* @param elementNum
* @throws IOException
*/
// Override this to use convertVector.
public void setConvertVectorElement(int elementNum) throws IOException {
throw new RuntimeException("Expected this method to be overridden");
}
// Common code used by the conversion.
public void convertVector(ColumnVector fromColVector,
ColumnVector resultColVector,
final int batchSize) throws IOException {
resultColVector.reset();
if (fromColVector.isRepeating) {
resultColVector.isRepeating = true;
if (fromColVector.noNulls || !fromColVector.isNull[0]) {
setConvertVectorElement(0);
} else {
resultColVector.noNulls = false;
resultColVector.isNull[0] = true;
}
} else if (fromColVector.noNulls) {
for (int i = 0; i < batchSize; i++) {
setConvertVectorElement(i);
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!fromColVector.isNull[i]) {
setConvertVectorElement(i);
} else {
resultColVector.noNulls = false;
resultColVector.isNull[i] = true;
}
}
}
}
public void downCastAnyInteger(LongColumnVector longColVector, int elementNum,
TypeDescription readerType) {
downCastAnyInteger(longColVector, elementNum, longColVector.vector[elementNum], readerType);
}
public void downCastAnyInteger(LongColumnVector longColVector, int elementNum, long inputLong,
TypeDescription readerType) {
long[] vector = longColVector.vector;
long outputLong;
Category readerCategory = readerType.getCategory();
switch (readerCategory) {
case BOOLEAN:
// No data loss for boolean.
vector[elementNum] = inputLong == 0 ? 0 : 1;
return;
case BYTE:
outputLong = (byte) inputLong;
break;
case SHORT:
outputLong = (short) inputLong;
break;
case INT:
outputLong = (int) inputLong;
break;
case LONG:
// No data loss for long.
vector[elementNum] = inputLong;
return;
default:
throw new RuntimeException("Unexpected type kind " + readerCategory.name());
}
if (outputLong != inputLong) {
// Data loss.
longColVector.isNull[elementNum] = true;
longColVector.noNulls = false;
} else {
vector[elementNum] = outputLong;
}
}
protected boolean integerDownCastNeeded(TypeDescription fileType, TypeDescription readerType) {
Integer fileLevel = numericTypes.get(fileType.getCategory());
Integer schemaLevel = numericTypes.get(readerType.getCategory());
return (schemaLevel.intValue() < fileLevel.intValue());
}
}
private static TypeReader createFromInteger(int columnId,
TypeDescription fileType,
Context context) throws IOException {
switch (fileType.getCategory()) {
case BOOLEAN:
return new BooleanTreeReader(columnId, context);
case BYTE:
return new ByteTreeReader(columnId, context);
case SHORT:
return new ShortTreeReader(columnId, context);
case INT:
return new IntTreeReader(columnId, context);
case LONG:
return new LongTreeReader(columnId, context);
default:
throw new RuntimeException("Unexpected type kind " + fileType);
}
}
public static class AnyIntegerFromAnyIntegerTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private final boolean downCastNeeded;
AnyIntegerFromAnyIntegerTreeReader(
int columnId, TypeDescription fileType, TypeDescription readerType,
Context context) throws IOException {
super(columnId, createFromInteger(columnId, fileType, context), context);
this.readerType = readerType;
downCastNeeded = integerDownCastNeeded(fileType, readerType);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
fromReader.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
LongColumnVector resultColVector = (LongColumnVector) previousVector;
if (downCastNeeded) {
if (resultColVector.isRepeating) {
if (resultColVector.noNulls || !resultColVector.isNull[0]) {
downCastAnyInteger(resultColVector, 0, readerType);
}
} else if (resultColVector.noNulls){
for (int i = 0; i < batchSize; i++) {
downCastAnyInteger(resultColVector, i, readerType);
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!resultColVector.isNull[i]) {
downCastAnyInteger(resultColVector, i, readerType);
}
}
}
}
}
}
public static class AnyIntegerFromDoubleTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private DoubleColumnVector doubleColVector;
private LongColumnVector longColVector;
AnyIntegerFromDoubleTreeReader(int columnId, TypeDescription fileType,
TypeDescription readerType, Context context)
throws IOException {
super(columnId, fileType.getCategory() == Category.DOUBLE ?
new DoubleTreeReader(columnId, context) :
new FloatTreeReader(columnId, context), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
double doubleValue = doubleColVector.vector[elementNum];
if (!doubleCanFitInLong(doubleValue)) {
longColVector.isNull[elementNum] = true;
longColVector.noNulls = false;
} else {
downCastAnyInteger(longColVector, elementNum, (long) doubleValue, readerType);
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (doubleColVector == null) {
// Allocate column vector for file; cast column vector for reader.
doubleColVector = new DoubleColumnVector(batchSize);
longColVector = (LongColumnVector) previousVector;
} else {
doubleColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(doubleColVector, isNull, batchSize, filterContext, readPhase);
convertVector(doubleColVector, longColVector, batchSize);
}
}
public static class AnyIntegerFromDecimalTreeReader extends ConvertTreeReader {
private final int precision;
private final int scale;
private final TypeDescription readerType;
private DecimalColumnVector decimalColVector;
private LongColumnVector longColVector;
AnyIntegerFromDecimalTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, new DecimalTreeReader(columnId, fileType.getPrecision(),
fileType.getScale(), context), context);
this.precision = fileType.getPrecision();
this.scale = fileType.getScale();
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
HiveDecimalWritable decWritable = decimalColVector.vector[elementNum];
long[] vector = longColVector.vector;
Category readerCategory = readerType.getCategory();
// Check to see if the decimal will fit in the Hive integer data type.
// If not, set the element to null.
boolean isInRange;
switch (readerCategory) {
case BOOLEAN:
// No data loss for boolean.
vector[elementNum] = decWritable.signum() == 0 ? 0 : 1;
return;
case BYTE:
isInRange = decWritable.isByte();
break;
case SHORT:
isInRange = decWritable.isShort();
break;
case INT:
isInRange = decWritable.isInt();
break;
case LONG:
isInRange = decWritable.isLong();
break;
default:
throw new RuntimeException("Unexpected type kind " + readerCategory.name());
}
if (!isInRange) {
longColVector.isNull[elementNum] = true;
longColVector.noNulls = false;
} else {
switch (readerCategory) {
case BYTE:
vector[elementNum] = decWritable.byteValue();
break;
case SHORT:
vector[elementNum] = decWritable.shortValue();
break;
case INT:
vector[elementNum] = decWritable.intValue();
break;
case LONG:
vector[elementNum] = decWritable.longValue();
break;
default:
throw new RuntimeException("Unexpected type kind " + readerCategory.name());
}
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (decimalColVector == null) {
// Allocate column vector for file; cast column vector for reader.
decimalColVector = new DecimalColumnVector(batchSize, precision, scale);
longColVector = (LongColumnVector) previousVector;
} else {
decimalColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(decimalColVector, isNull, batchSize, filterContext, readPhase);
convertVector(decimalColVector, longColVector, batchSize);
}
}
public static class AnyIntegerFromStringGroupTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private BytesColumnVector bytesColVector;
private LongColumnVector longColVector;
AnyIntegerFromStringGroupTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
String string = SerializationUtils.bytesVectorToString(bytesColVector, elementNum);
long longValue = parseLongFromString(string);
if (!getIsParseError()) {
downCastAnyInteger(longColVector, elementNum, longValue, readerType);
} else {
longColVector.noNulls = false;
longColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (bytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
bytesColVector = new BytesColumnVector(batchSize);
longColVector = (LongColumnVector) previousVector;
} else {
bytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(bytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(bytesColVector, longColVector, batchSize);
}
}
public static class AnyIntegerFromTimestampTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private TimestampColumnVector timestampColVector;
private LongColumnVector longColVector;
AnyIntegerFromTimestampTreeReader(int columnId, TypeDescription readerType,
Context context,
boolean instantType) throws IOException {
super(columnId, new TimestampTreeReader(columnId, context, instantType), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) {
long millis = timestampColVector.asScratchTimestamp(elementNum).getTime();
long longValue = Math.floorDiv(millis, 1000);
downCastAnyInteger(longColVector, elementNum, longValue, readerType);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (timestampColVector == null) {
// Allocate column vector for file; cast column vector for reader.
timestampColVector = new TimestampColumnVector(batchSize);
longColVector = (LongColumnVector) previousVector;
} else {
timestampColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(timestampColVector, isNull, batchSize, filterContext, readPhase);
convertVector(timestampColVector, longColVector, batchSize);
}
}
public static class DoubleFromAnyIntegerTreeReader extends ConvertTreeReader {
private LongColumnVector longColVector;
private DoubleColumnVector doubleColVector;
DoubleFromAnyIntegerTreeReader(int columnId, TypeDescription fileType,
Context context) throws IOException {
super(columnId, createFromInteger(columnId, fileType, context), context);
}
@Override
public void setConvertVectorElement(int elementNum) {
double doubleValue = (double) longColVector.vector[elementNum];
if (!Double.isNaN(doubleValue)) {
doubleColVector.vector[elementNum] = doubleValue;
} else {
doubleColVector.vector[elementNum] = Double.NaN;
doubleColVector.noNulls = false;
doubleColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new LongColumnVector(batchSize);
doubleColVector = (DoubleColumnVector) previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, doubleColVector, batchSize);
}
}
public static class DoubleFromDecimalTreeReader extends ConvertTreeReader {
private final int precision;
private final int scale;
private DecimalColumnVector decimalColVector;
private DoubleColumnVector doubleColVector;
DoubleFromDecimalTreeReader(
int columnId, TypeDescription fileType, Context context) throws IOException {
super(columnId, new DecimalTreeReader(columnId, fileType.getPrecision(),
fileType.getScale(), context), context);
this.precision = fileType.getPrecision();
this.scale = fileType.getScale();
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
doubleColVector.vector[elementNum] =
decimalColVector.vector[elementNum].doubleValue();
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (decimalColVector == null) {
// Allocate column vector for file; cast column vector for reader.
decimalColVector = new DecimalColumnVector(batchSize, precision, scale);
doubleColVector = (DoubleColumnVector) previousVector;
} else {
decimalColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(decimalColVector, isNull, batchSize, filterContext, readPhase);
convertVector(decimalColVector, doubleColVector, batchSize);
}
}
public static class DoubleFromStringGroupTreeReader extends ConvertTreeReader {
private BytesColumnVector bytesColVector;
private DoubleColumnVector doubleColVector;
DoubleFromStringGroupTreeReader(int columnId, TypeDescription fileType, Context context)
throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
String string = SerializationUtils.bytesVectorToString(bytesColVector, elementNum);
double doubleValue = parseDoubleFromString(string);
if (!getIsParseError()) {
doubleColVector.vector[elementNum] = doubleValue;
} else {
doubleColVector.noNulls = false;
doubleColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (bytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
bytesColVector = new BytesColumnVector(batchSize);
doubleColVector = (DoubleColumnVector) previousVector;
} else {
bytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(bytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(bytesColVector, doubleColVector, batchSize);
}
}
public static class DoubleFromTimestampTreeReader extends ConvertTreeReader {
private TimestampColumnVector timestampColVector;
private DoubleColumnVector doubleColVector;
DoubleFromTimestampTreeReader(int columnId, Context context,
boolean instantType) throws IOException {
super(columnId, new TimestampTreeReader(columnId, context, instantType), context);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
Timestamp ts = timestampColVector.asScratchTimestamp(elementNum);
double result = Math.floorDiv(ts.getTime(), 1000);
int nano = ts.getNanos();
if (nano != 0) {
result += nano / 1_000_000_000.0;
}
doubleColVector.vector[elementNum] = result;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (timestampColVector == null) {
// Allocate column vector for file; cast column vector for reader.
timestampColVector = new TimestampColumnVector(batchSize);
doubleColVector = (DoubleColumnVector) previousVector;
} else {
timestampColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(timestampColVector, isNull, batchSize, filterContext, readPhase);
convertVector(timestampColVector, doubleColVector, batchSize);
}
}
public static class FloatFromDoubleTreeReader extends ConvertTreeReader {
FloatFromDoubleTreeReader(int columnId, Context context) throws IOException {
super(columnId, new DoubleTreeReader(columnId, context), context);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
// Read present/isNull stream
fromReader.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
DoubleColumnVector vector = (DoubleColumnVector) previousVector;
if (previousVector.isRepeating) {
vector.vector[0] = (float) vector.vector[0];
} else {
for(int i=0; i < batchSize; ++i) {
vector.vector[i] = (float) vector.vector[i];
}
}
}
}
public static class DecimalFromAnyIntegerTreeReader extends ConvertTreeReader {
private LongColumnVector longColVector;
private ColumnVector decimalColVector;
private final HiveDecimalWritable value;
DecimalFromAnyIntegerTreeReader(int columnId, TypeDescription fileType, Context context)
throws IOException {
super(columnId, createFromInteger(columnId, fileType, context), context);
value = new HiveDecimalWritable();
}
@Override
public void setConvertVectorElement(int elementNum) {
long longValue = longColVector.vector[elementNum];
this.value.setFromLong(longValue);
// The DecimalColumnVector will enforce precision and scale and set the entry to null when out of bounds.
if (decimalColVector instanceof Decimal64ColumnVector) {
((Decimal64ColumnVector) decimalColVector).set(elementNum, value);
} else {
((DecimalColumnVector) decimalColVector).set(elementNum, value);
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new LongColumnVector(batchSize);
decimalColVector = previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, decimalColVector, batchSize);
}
}
public static class DecimalFromDoubleTreeReader extends ConvertTreeReader {
private DoubleColumnVector doubleColVector;
private ColumnVector decimalColVector;
DecimalFromDoubleTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context)
throws IOException {
super(columnId, fileType.getCategory() == Category.DOUBLE ?
new DoubleTreeReader(columnId, context) :
new FloatTreeReader(columnId, context), context);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
HiveDecimal value =
HiveDecimal.create(Double.toString(doubleColVector.vector[elementNum]));
if (value != null) {
if (decimalColVector instanceof Decimal64ColumnVector) {
((Decimal64ColumnVector) decimalColVector).set(elementNum, value);
} else {
((DecimalColumnVector) decimalColVector).set(elementNum, value);
}
} else {
decimalColVector.noNulls = false;
decimalColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (doubleColVector == null) {
// Allocate column vector for file; cast column vector for reader.
doubleColVector = new DoubleColumnVector(batchSize);
decimalColVector = previousVector;
} else {
doubleColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(doubleColVector, isNull, batchSize, filterContext, readPhase);
convertVector(doubleColVector, decimalColVector, batchSize);
}
}
public static class DecimalFromStringGroupTreeReader extends ConvertTreeReader {
private BytesColumnVector bytesColVector;
private ColumnVector decimalColVector;
DecimalFromStringGroupTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
String string = SerializationUtils.bytesVectorToString(bytesColVector, elementNum);
HiveDecimal value = parseDecimalFromString(string);
if (value != null) {
// The DecimalColumnVector will enforce precision and scale and set the entry to null when out of bounds.
if (decimalColVector instanceof Decimal64ColumnVector) {
((Decimal64ColumnVector) decimalColVector).set(elementNum, value);
} else {
((DecimalColumnVector) decimalColVector).set(elementNum, value);
}
} else {
decimalColVector.noNulls = false;
decimalColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (bytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
bytesColVector = new BytesColumnVector(batchSize);
decimalColVector = previousVector;
} else {
bytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(bytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(bytesColVector, decimalColVector, batchSize);
}
}
public static class DecimalFromTimestampTreeReader extends ConvertTreeReader {
private TimestampColumnVector timestampColVector;
private ColumnVector decimalColVector;
DecimalFromTimestampTreeReader(int columnId, Context context,
boolean instantType) throws IOException {
super(columnId, new TimestampTreeReader(columnId, context, instantType), context);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
long seconds = Math.floorDiv(timestampColVector.time[elementNum], 1000);
long nanos = timestampColVector.nanos[elementNum];
if (seconds < 0 && nanos > 0) {
seconds += 1;
nanos = 1_000_000_000 - nanos;
}
BigDecimal secondsBd = new BigDecimal(seconds);
BigDecimal nanosBd = new BigDecimal(nanos).movePointLeft(9);
BigDecimal resultBd = (seconds >= 0L) ? secondsBd.add(nanosBd) : secondsBd.subtract(nanosBd);
HiveDecimal value = HiveDecimal.create(resultBd);
if (value != null) {
// The DecimalColumnVector will enforce precision and scale and set the entry to null when out of bounds.
if (decimalColVector instanceof Decimal64ColumnVector) {
((Decimal64ColumnVector) decimalColVector).set(elementNum, value);
} else {
((DecimalColumnVector) decimalColVector).set(elementNum, value);
}
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (timestampColVector == null) {
// Allocate column vector for file; cast column vector for reader.
timestampColVector = new TimestampColumnVector(batchSize);
decimalColVector = previousVector;
} else {
timestampColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(timestampColVector, isNull, batchSize, filterContext, readPhase);
convertVector(timestampColVector, decimalColVector, batchSize);
}
}
public static class DecimalFromDecimalTreeReader extends ConvertTreeReader {
private DecimalColumnVector fileDecimalColVector;
private int filePrecision;
private int fileScale;
private ColumnVector decimalColVector;
DecimalFromDecimalTreeReader(
int columnId, TypeDescription fileType, TypeDescription readerType, Context context)
throws IOException {
super(columnId, new DecimalTreeReader(columnId, fileType.getPrecision(),
fileType.getScale(), context), context);
filePrecision = fileType.getPrecision();
fileScale = fileType.getScale();
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
if (decimalColVector instanceof Decimal64ColumnVector) {
((Decimal64ColumnVector) decimalColVector).set(
elementNum, fileDecimalColVector.vector[elementNum]);
} else {
((DecimalColumnVector) decimalColVector).set(
elementNum, fileDecimalColVector.vector[elementNum]);
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (fileDecimalColVector == null) {
// Allocate column vector for file; cast column vector for reader.
fileDecimalColVector = new DecimalColumnVector(batchSize, filePrecision, fileScale);
decimalColVector = previousVector;
} else {
fileDecimalColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(fileDecimalColVector, isNull, batchSize, filterContext, readPhase);
convertVector(fileDecimalColVector, decimalColVector, batchSize);
}
}
public static class StringGroupFromAnyIntegerTreeReader extends ConvertTreeReader {
protected final TypeDescription readerType;
protected LongColumnVector longColVector;
protected BytesColumnVector bytesColVector;
StringGroupFromAnyIntegerTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, createFromInteger(columnId, fileType, context), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) {
byte[] bytes = Long.toString(longColVector.vector[elementNum])
.getBytes(StandardCharsets.UTF_8);
assignStringGroupVectorEntry(bytesColVector, elementNum, readerType, bytes);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new LongColumnVector(batchSize);
bytesColVector = (BytesColumnVector) previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, bytesColVector, batchSize);
}
}
public static class StringGroupFromBooleanTreeReader extends StringGroupFromAnyIntegerTreeReader {
private static final byte[] TRUE_BYTES = "TRUE".getBytes(StandardCharsets.US_ASCII);
private static final byte[] FALSE_BYTES = "FALSE".getBytes(StandardCharsets.US_ASCII);
StringGroupFromBooleanTreeReader(int columnId, TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, fileType, readerType, context);
}
@Override
public void setConvertVectorElement(int elementNum) {
byte[] bytes = (longColVector.vector[elementNum] != 0 ? TRUE_BYTES : FALSE_BYTES);
assignStringGroupVectorEntry(bytesColVector, elementNum, readerType, bytes);
}
}
public static class StringGroupFromDoubleTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private DoubleColumnVector doubleColVector;
private BytesColumnVector bytesColVector;
StringGroupFromDoubleTreeReader(int columnId, TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, fileType.getCategory() == Category.DOUBLE ?
new DoubleTreeReader(columnId, context) :
new FloatTreeReader(columnId, context), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) {
double doubleValue = doubleColVector.vector[elementNum];
if (!Double.isNaN(doubleValue)) {
String string = Double.toString(doubleValue);
byte[] bytes = string.getBytes(StandardCharsets.US_ASCII);
assignStringGroupVectorEntry(bytesColVector, elementNum, readerType, bytes);
} else {
bytesColVector.noNulls = false;
bytesColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (doubleColVector == null) {
// Allocate column vector for file; cast column vector for reader.
doubleColVector = new DoubleColumnVector(batchSize);
bytesColVector = (BytesColumnVector) previousVector;
} else {
doubleColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(doubleColVector, isNull, batchSize, filterContext, readPhase);
convertVector(doubleColVector, bytesColVector, batchSize);
}
}
public static class StringGroupFromDecimalTreeReader extends ConvertTreeReader {
private int precision;
private int scale;
private final TypeDescription readerType;
private DecimalColumnVector decimalColVector;
private BytesColumnVector bytesColVector;
private byte[] scratchBuffer;
StringGroupFromDecimalTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, new DecimalTreeReader(columnId, fileType.getPrecision(),
fileType.getScale(), context), context);
this.precision = fileType.getPrecision();
this.scale = fileType.getScale();
this.readerType = readerType;
scratchBuffer = new byte[HiveDecimal.SCRATCH_BUFFER_LEN_TO_BYTES];
}
@Override
public void setConvertVectorElement(int elementNum) {
HiveDecimalWritable decWritable = decimalColVector.vector[elementNum];
// Convert decimal into bytes instead of a String for better performance.
final int byteIndex = decWritable.toBytes(scratchBuffer);
assignStringGroupVectorEntry(
bytesColVector, elementNum, readerType,
scratchBuffer, byteIndex, HiveDecimal.SCRATCH_BUFFER_LEN_TO_BYTES - byteIndex);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (decimalColVector == null) {
// Allocate column vector for file; cast column vector for reader.
decimalColVector = new DecimalColumnVector(batchSize, precision, scale);
bytesColVector = (BytesColumnVector) previousVector;
} else {
decimalColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(decimalColVector, isNull, batchSize, filterContext, readPhase);
convertVector(decimalColVector, bytesColVector, batchSize);
}
}
/**
* The format for converting from/to string/date.
* Eg. "2019-07-09"
*/
static final DateTimeFormatter DATE_FORMAT =
new DateTimeFormatterBuilder()
.appendValue(ChronoField.YEAR, 4, 10, SignStyle.EXCEEDS_PAD)
.appendLiteral('-')
.appendValue(ChronoField.MONTH_OF_YEAR, 2)
.appendLiteral('-')
.appendValue(ChronoField.DAY_OF_MONTH, 2)
.toFormatter();
/**
* The format for converting from/to string/timestamp.
* Eg. "2019-07-09 13:11:00"
*/
static final DateTimeFormatter TIMESTAMP_FORMAT =
new DateTimeFormatterBuilder()
.append(DATE_FORMAT)
.appendLiteral(' ')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.optionalStart()
.appendLiteral(':')
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.optionalStart()
.appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
.toFormatter();
/**
* The format for converting from/to string/timestamp with local time zone.
* Eg. "2019-07-09 13:11:00 America/Los_Angeles"
*/
static final DateTimeFormatter INSTANT_TIMESTAMP_FORMAT =
new DateTimeFormatterBuilder()
.append(TIMESTAMP_FORMAT)
.appendPattern(" VV")
.toFormatter();
static final long MIN_EPOCH_SECONDS = Instant.MIN.getEpochSecond();
static final long MAX_EPOCH_SECONDS = Instant.MAX.getEpochSecond();
/**
* Create an Instant from an entry in a TimestampColumnVector.
* It assumes that vector.isRepeating and null values have been handled
* before we get called.
* @param vector the timestamp column vector
* @param element the element number
* @return a timestamp Instant
*/
static Instant timestampToInstant(TimestampColumnVector vector, int element) {
return Instant.ofEpochSecond(Math.floorDiv(vector.time[element], 1000),
vector.nanos[element]);
}
/**
* Convert a decimal to an Instant using seconds & nanos.
* @param vector the decimal64 column vector
* @param element the element number to use
* @param value the writable container to reuse
* @return the timestamp instant
*/
static Instant decimalToInstant(DecimalColumnVector vector, int element,
HiveDecimalWritable value) {
final HiveDecimalWritable writable = vector.vector[element];
final long seconds = writable.longValue();
if (seconds < MIN_EPOCH_SECONDS || seconds > MAX_EPOCH_SECONDS) {
return null;
} else {
// copy the value so that we can mutate it
value.set(writable);
value.mutateFractionPortion();
value.mutateScaleByPowerOfTen(9);
int nanos = (int) value.longValue();
return Instant.ofEpochSecond(seconds, nanos);
}
}
public static class StringGroupFromTimestampTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private final ZoneId local;
private final DateTimeFormatter formatter;
private TimestampColumnVector timestampColVector;
private BytesColumnVector bytesColVector;
StringGroupFromTimestampTreeReader(int columnId, TypeDescription readerType,
Context context,
boolean instantType) throws IOException {
super(columnId, new TimestampTreeReader(columnId, context, instantType), context);
this.readerType = readerType;
local = context.getUseUTCTimestamp() ? ZoneId.of("UTC")
: ZoneId.systemDefault();
Chronology chronology = context.useProlepticGregorian()
? IsoChronology.INSTANCE : HybridChronology.INSTANCE;
formatter = (instantType ? INSTANT_TIMESTAMP_FORMAT : TIMESTAMP_FORMAT)
.withChronology(chronology);
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
String string = timestampToInstant(timestampColVector, elementNum).atZone(local)
.format(formatter);
byte[] bytes = string.getBytes(StandardCharsets.UTF_8);
assignStringGroupVectorEntry(bytesColVector, elementNum, readerType, bytes);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (timestampColVector == null) {
// Allocate column vector for file; cast column vector for reader.
timestampColVector = new TimestampColumnVector(batchSize);
bytesColVector = (BytesColumnVector) previousVector;
} else {
timestampColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(timestampColVector, isNull, batchSize, filterContext, readPhase);
convertVector(timestampColVector, bytesColVector, batchSize);
}
}
public static class StringGroupFromDateTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private DateColumnVector longColVector;
private BytesColumnVector bytesColVector;
private final boolean useProlepticGregorian;
StringGroupFromDateTreeReader(int columnId, TypeDescription readerType,
Context context) throws IOException {
super(columnId, new DateTreeReader(columnId, context), context);
this.readerType = readerType;
useProlepticGregorian = context.useProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) {
String dateStr = DateUtils.printDate((int) (longColVector.vector[elementNum]),
useProlepticGregorian);
byte[] bytes = dateStr.getBytes(StandardCharsets.UTF_8);
assignStringGroupVectorEntry(bytesColVector, elementNum, readerType, bytes);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new DateColumnVector(batchSize);
bytesColVector = (BytesColumnVector) previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, bytesColVector, batchSize);
}
}
public static class StringGroupFromStringGroupTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
StringGroupFromStringGroupTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
this.readerType = readerType;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
fromReader.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
BytesColumnVector resultColVector = (BytesColumnVector) previousVector;
if (resultColVector.isRepeating) {
if (resultColVector.noNulls || !resultColVector.isNull[0]) {
convertStringGroupVectorElement(resultColVector, 0, readerType);
} else {
// Remains null.
}
} else if (resultColVector.noNulls){
for (int i = 0; i < batchSize; i++) {
convertStringGroupVectorElement(resultColVector, i, readerType);
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!resultColVector.isNull[i]) {
convertStringGroupVectorElement(resultColVector, i, readerType);
} else {
// Remains null.
}
}
}
}
}
public static class StringGroupFromBinaryTreeReader extends ConvertTreeReader {
private final TypeDescription readerType;
private BytesColumnVector inBytesColVector;
private BytesColumnVector outBytesColVector;
StringGroupFromBinaryTreeReader(int columnId, TypeDescription readerType,
Context context) throws IOException {
super(columnId, new BinaryTreeReader(columnId, context), context);
this.readerType = readerType;
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
byte[] bytes = inBytesColVector.vector[elementNum];
int start = inBytesColVector.start[elementNum];
int length = inBytesColVector.length[elementNum];
final byte[] string = (length == 0) ? ArrayUtils.EMPTY_BYTE_ARRAY : new byte[3 * length - 1];
for(int p = 0; p < string.length; p += 2) {
if (p != 0) {
string[p++] = ' ';
}
int num = 0xff & bytes[start++];
int digit = num / 16;
string[p] = (byte)((digit) + (digit < 10 ? '0' : 'a' - 10));
digit = num % 16;
string[p + 1] = (byte)((digit) + (digit < 10 ? '0' : 'a' - 10));
}
assignStringGroupVectorEntry(outBytesColVector, elementNum, readerType,
string, 0, string.length);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (inBytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
inBytesColVector = new BytesColumnVector(batchSize);
outBytesColVector = (BytesColumnVector) previousVector;
} else {
inBytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(inBytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(inBytesColVector, outBytesColVector, batchSize);
}
}
public static class TimestampFromAnyIntegerTreeReader extends ConvertTreeReader {
private LongColumnVector longColVector;
private TimestampColumnVector timestampColVector;
private final boolean useUtc;
private final TimeZone local;
private final boolean fileUsedProlepticGregorian;
private final boolean useProlepticGregorian;
TimestampFromAnyIntegerTreeReader(int columnId, TypeDescription fileType,
Context context,
boolean isInstant) throws IOException {
super(columnId, createFromInteger(columnId, fileType, context), context);
this.useUtc = isInstant || context.getUseUTCTimestamp();
local = TimeZone.getDefault();
fileUsedProlepticGregorian = context.fileUsedProlepticGregorian();
useProlepticGregorian = context.useProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) {
long millis = longColVector.vector[elementNum] * 1000;
timestampColVector.time[elementNum] = useUtc
? millis
: SerializationUtils.convertFromUtc(local, millis);
timestampColVector.nanos[elementNum] = 0;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new LongColumnVector(batchSize);
timestampColVector = (TimestampColumnVector) previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
timestampColVector.changeCalendar(fileUsedProlepticGregorian, false);
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, timestampColVector, batchSize);
timestampColVector.changeCalendar(useProlepticGregorian, true);
}
}
public static class TimestampFromDoubleTreeReader extends ConvertTreeReader {
private DoubleColumnVector doubleColVector;
private TimestampColumnVector timestampColVector;
private final boolean useUtc;
private final TimeZone local;
private final boolean useProlepticGregorian;
private final boolean fileUsedProlepticGregorian;
TimestampFromDoubleTreeReader(int columnId, TypeDescription fileType,
TypeDescription readerType, Context context) throws IOException {
super(columnId, fileType.getCategory() == Category.DOUBLE ?
new DoubleTreeReader(columnId, context) :
new FloatTreeReader(columnId, context), context);
useUtc = readerType.getCategory() == Category.TIMESTAMP_INSTANT ||
context.getUseUTCTimestamp();
local = TimeZone.getDefault();
useProlepticGregorian = context.useProlepticGregorian();
fileUsedProlepticGregorian = context.fileUsedProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) {
double seconds = doubleColVector.vector[elementNum];
if (!useUtc) {
seconds = SerializationUtils.convertFromUtc(local, seconds);
}
// overflow
double doubleMillis = seconds * 1000;
long millis = Math.round(doubleMillis);
if (doubleMillis > Long.MAX_VALUE || doubleMillis < Long.MIN_VALUE ||
((millis >= 0) != (doubleMillis >= 0))) {
timestampColVector.time[elementNum] = 0L;
timestampColVector.nanos[elementNum] = 0;
timestampColVector.isNull[elementNum] = true;
timestampColVector.noNulls = false;
} else {
timestampColVector.time[elementNum] = millis;
timestampColVector.nanos[elementNum] =
(int) Math.floorMod(millis, 1000) * 1_000_000;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (doubleColVector == null) {
// Allocate column vector for file; cast column vector for reader.
doubleColVector = new DoubleColumnVector(batchSize);
timestampColVector = (TimestampColumnVector) previousVector;
} else {
doubleColVector.ensureSize(batchSize, false);
}
timestampColVector.changeCalendar(fileUsedProlepticGregorian, false);
// Read present/isNull stream
fromReader.nextVector(doubleColVector, isNull, batchSize, filterContext, readPhase);
convertVector(doubleColVector, timestampColVector, batchSize);
timestampColVector.changeCalendar(useProlepticGregorian, true);
}
}
public static class TimestampFromDecimalTreeReader extends ConvertTreeReader {
private final int precision;
private final int scale;
private DecimalColumnVector decimalColVector;
private TimestampColumnVector timestampColVector;
private final boolean useUtc;
private final TimeZone local;
private final boolean useProlepticGregorian;
private final boolean fileUsedProlepticGregorian;
private final HiveDecimalWritable value;
TimestampFromDecimalTreeReader(int columnId, TypeDescription fileType,
Context context,
boolean isInstant) throws IOException {
super(columnId, new DecimalTreeReader(columnId, fileType.getPrecision(),
fileType.getScale(), context), context);
this.precision = fileType.getPrecision();
this.scale = fileType.getScale();
useUtc = isInstant || context.getUseUTCTimestamp();
local = TimeZone.getDefault();
useProlepticGregorian = context.useProlepticGregorian();
fileUsedProlepticGregorian = context.fileUsedProlepticGregorian();
value = new HiveDecimalWritable();
}
@Override
public void setConvertVectorElement(int elementNum) {
Instant t = decimalToInstant(decimalColVector, elementNum, value);
if (t == null) {
timestampColVector.noNulls = false;
timestampColVector.isNull[elementNum] = true;
} else if (!useUtc) {
long millis = t.toEpochMilli();
timestampColVector.time[elementNum] =
SerializationUtils.convertFromUtc(local, millis);
timestampColVector.nanos[elementNum] = t.getNano();
} else {
timestampColVector.time[elementNum] = t.toEpochMilli();
timestampColVector.nanos[elementNum] = t.getNano();
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (decimalColVector == null) {
// Allocate column vector for file; cast column vector for reader.
decimalColVector = new DecimalColumnVector(batchSize, precision, scale);
timestampColVector = (TimestampColumnVector) previousVector;
} else {
decimalColVector.ensureSize(batchSize, false);
}
timestampColVector.changeCalendar(fileUsedProlepticGregorian, false);
// Read present/isNull stream
fromReader.nextVector(decimalColVector, isNull, batchSize, filterContext, readPhase);
convertVector(decimalColVector, timestampColVector, batchSize);
timestampColVector.changeCalendar(useProlepticGregorian, true);
}
}
public static class TimestampFromStringGroupTreeReader extends ConvertTreeReader {
private BytesColumnVector bytesColVector;
private TimestampColumnVector timestampColVector;
private final DateTimeFormatter formatter;
private final boolean useProlepticGregorian;
TimestampFromStringGroupTreeReader(int columnId, TypeDescription fileType,
Context context, boolean isInstant)
throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
useProlepticGregorian = context.useProlepticGregorian();
Chronology chronology = useProlepticGregorian
? IsoChronology.INSTANCE
: HybridChronology.INSTANCE;
if (isInstant) {
formatter = INSTANT_TIMESTAMP_FORMAT.withChronology(chronology);
} else {
formatter = TIMESTAMP_FORMAT
.withZone(context.getUseUTCTimestamp() ?
ZoneId.of("UTC") :
ZoneId.systemDefault())
.withChronology(chronology);
}
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
String str = SerializationUtils.bytesVectorToString(bytesColVector,
elementNum);
try {
Instant instant = Instant.from(formatter.parse(str));
timestampColVector.time[elementNum] = instant.toEpochMilli();
timestampColVector.nanos[elementNum] = instant.getNano();
} catch (DateTimeParseException e) {
timestampColVector.noNulls = false;
timestampColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (bytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
bytesColVector = new BytesColumnVector(batchSize);
timestampColVector = (TimestampColumnVector) previousVector;
} else {
bytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(bytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(bytesColVector, timestampColVector, batchSize);
timestampColVector.changeCalendar(useProlepticGregorian, false);
}
}
public static class TimestampFromDateTreeReader extends ConvertTreeReader {
private DateColumnVector longColVector;
private TimestampColumnVector timestampColVector;
private final boolean useUtc;
private final TimeZone local = TimeZone.getDefault();
private final boolean useProlepticGregorian;
TimestampFromDateTreeReader(int columnId, TypeDescription readerType,
Context context) throws IOException {
super(columnId, new DateTreeReader(columnId, context), context);
useUtc = readerType.getCategory() == Category.TIMESTAMP_INSTANT ||
context.getUseUTCTimestamp();
useProlepticGregorian = context.useProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) {
long days = longColVector.vector[elementNum];
long millis = days * 24 * 60 * 60 * 1000;
timestampColVector.time[elementNum] = useUtc ?
millis :
SerializationUtils.convertFromUtc(local, millis);
timestampColVector.nanos[elementNum] = 0;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (longColVector == null) {
// Allocate column vector for file; cast column vector for reader.
longColVector = new DateColumnVector(batchSize);
timestampColVector = (TimestampColumnVector) previousVector;
} else {
longColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(longColVector, isNull, batchSize, filterContext, readPhase);
convertVector(longColVector, timestampColVector, batchSize);
timestampColVector.changeCalendar(useProlepticGregorian, false);
}
}
public static class DateFromStringGroupTreeReader extends ConvertTreeReader {
private BytesColumnVector bytesColVector;
private LongColumnVector longColVector;
private DateColumnVector dateColumnVector;
private final boolean useProlepticGregorian;
DateFromStringGroupTreeReader(int columnId, TypeDescription fileType, Context context)
throws IOException {
super(columnId, getStringGroupTreeReader(columnId, fileType, context), context);
useProlepticGregorian = context.useProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) {
String stringValue =
SerializationUtils.bytesVectorToString(bytesColVector, elementNum);
Integer dateValue = DateUtils.parseDate(stringValue, useProlepticGregorian);
if (dateValue != null) {
longColVector.vector[elementNum] = dateValue;
} else {
longColVector.noNulls = false;
longColVector.isNull[elementNum] = true;
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (bytesColVector == null) {
// Allocate column vector for file; cast column vector for reader.
bytesColVector = new BytesColumnVector(batchSize);
longColVector = (LongColumnVector) previousVector;
if (longColVector instanceof DateColumnVector) {
dateColumnVector = (DateColumnVector) longColVector;
} else {
dateColumnVector = null;
if (useProlepticGregorian) {
throw new IllegalArgumentException("Can't use LongColumnVector with" +
" proleptic Gregorian dates.");
}
}
} else {
bytesColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(bytesColVector, isNull, batchSize, filterContext, readPhase);
convertVector(bytesColVector, longColVector, batchSize);
if (dateColumnVector != null) {
dateColumnVector.changeCalendar(useProlepticGregorian, false);
}
}
}
public static class DateFromTimestampTreeReader extends ConvertTreeReader {
private TimestampColumnVector timestampColVector;
private LongColumnVector longColVector;
private final ZoneId local;
private final boolean useProlepticGregorian;
DateFromTimestampTreeReader(int columnId, Context context,
boolean instantType) throws IOException {
super(columnId, new TimestampTreeReader(columnId, context, instantType), context);
boolean useUtc = instantType || context.getUseUTCTimestamp();
local = useUtc ? ZoneId.of("UTC") : ZoneId.systemDefault();
useProlepticGregorian = context.useProlepticGregorian();
}
@Override
public void setConvertVectorElement(int elementNum) throws IOException {
LocalDate day = LocalDate.from(
Instant.ofEpochSecond(timestampColVector.time[elementNum] / 1000,
timestampColVector.nanos[elementNum])
.atZone(local));
longColVector.vector[elementNum] = day.toEpochDay();
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (timestampColVector == null) {
// Allocate column vector for file; cast column vector for reader.
timestampColVector = new TimestampColumnVector(batchSize);
longColVector = (LongColumnVector) previousVector;
if (useProlepticGregorian && !(longColVector instanceof DateColumnVector)) {
throw new IllegalArgumentException("Can't use LongColumnVector with" +
" proleptic Gregorian dates.");
}
} else {
timestampColVector.ensureSize(batchSize, false);
}
// Read present/isNull stream
fromReader.nextVector(timestampColVector, isNull, batchSize, filterContext, readPhase);
convertVector(timestampColVector, longColVector, batchSize);
if (longColVector instanceof DateColumnVector) {
((DateColumnVector) longColVector)
.changeCalendar(useProlepticGregorian, false);
}
}
}
private static TypeReader createBooleanConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from BOOLEAN to schema type.
//
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
if (fileType.getCategory() == readerType.getCategory()) {
throw new IllegalArgumentException("No conversion of type " +
readerType.getCategory() + " to self needed");
}
return new AnyIntegerFromAnyIntegerTreeReader(columnId, fileType, readerType,
context);
case FLOAT:
case DOUBLE:
return new DoubleFromAnyIntegerTreeReader(columnId, fileType, context);
case DECIMAL:
return new DecimalFromAnyIntegerTreeReader(columnId, fileType, context);
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromBooleanTreeReader(columnId, fileType, readerType,
context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromAnyIntegerTreeReader(columnId, fileType, context,
readerType.getCategory() == Category.TIMESTAMP_INSTANT);
// Not currently supported conversion(s):
case BINARY:
case DATE:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createAnyIntegerConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from (BYTE, SHORT, INT, LONG) to schema type.
//
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
if (fileType.getCategory() == readerType.getCategory()) {
throw new IllegalArgumentException("No conversion of type " +
readerType.getCategory() + " to self needed");
}
return new AnyIntegerFromAnyIntegerTreeReader(columnId, fileType, readerType,
context);
case FLOAT:
case DOUBLE:
return new DoubleFromAnyIntegerTreeReader(columnId, fileType,
context);
case DECIMAL:
return new DecimalFromAnyIntegerTreeReader(columnId, fileType, context);
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromAnyIntegerTreeReader(columnId, fileType, readerType,
context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromAnyIntegerTreeReader(columnId, fileType, context,
readerType.getCategory() == Category.TIMESTAMP_INSTANT);
// Not currently supported conversion(s):
case BINARY:
case DATE:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createDoubleConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from DOUBLE to schema type.
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
return new AnyIntegerFromDoubleTreeReader(columnId, fileType, readerType, context);
case FLOAT:
return new FloatFromDoubleTreeReader(columnId, context);
case DOUBLE:
return new FloatTreeReader(columnId, context);
case DECIMAL:
return new DecimalFromDoubleTreeReader(columnId, fileType, readerType, context);
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromDoubleTreeReader(columnId, fileType, readerType, context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromDoubleTreeReader(columnId, fileType, readerType, context);
// Not currently supported conversion(s):
case BINARY:
case DATE:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createDecimalConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from DECIMAL to schema type.
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
return new AnyIntegerFromDecimalTreeReader(columnId, fileType, readerType, context);
case FLOAT:
case DOUBLE:
return new DoubleFromDecimalTreeReader(columnId, fileType, context);
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromDecimalTreeReader(columnId, fileType, readerType, context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromDecimalTreeReader(columnId, fileType, context,
readerType.getCategory() == Category.TIMESTAMP_INSTANT);
case DECIMAL:
return new DecimalFromDecimalTreeReader(columnId, fileType, readerType, context);
// Not currently supported conversion(s):
case BINARY:
case DATE:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createStringConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from STRING to schema type.
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
return new AnyIntegerFromStringGroupTreeReader(columnId, fileType, readerType, context);
case FLOAT:
case DOUBLE:
return new DoubleFromStringGroupTreeReader(columnId, fileType, context);
case DECIMAL:
return new DecimalFromStringGroupTreeReader(columnId, fileType, readerType, context);
case CHAR:
case VARCHAR:
case STRING:
return new StringGroupFromStringGroupTreeReader(columnId, fileType, readerType, context);
case BINARY:
return new BinaryTreeReader(columnId, context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromStringGroupTreeReader(columnId, fileType, context,
readerType.getCategory() == Category.TIMESTAMP_INSTANT);
case DATE:
return new DateFromStringGroupTreeReader(columnId, fileType, context);
// Not currently supported conversion(s):
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createTimestampConvertTreeReader(int columnId,
TypeDescription fileType,
TypeDescription readerType,
Context context) throws IOException {
boolean isInstant = fileType.getCategory() == Category.TIMESTAMP_INSTANT;
// CONVERT from TIMESTAMP to schema type.
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
return new AnyIntegerFromTimestampTreeReader(columnId, readerType,
context, isInstant);
case FLOAT:
case DOUBLE:
return new DoubleFromTimestampTreeReader(columnId, context, isInstant);
case DECIMAL:
return new DecimalFromTimestampTreeReader(columnId, context, isInstant);
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromTimestampTreeReader(columnId, readerType,
context, isInstant);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampTreeReader(columnId, context, isInstant);
case DATE:
return new DateFromTimestampTreeReader(columnId, context, isInstant);
// Not currently supported conversion(s):
case BINARY:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createDateConvertTreeReader(int columnId,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from DATE to schema type.
switch (readerType.getCategory()) {
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromDateTreeReader(columnId, readerType, context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampFromDateTreeReader(columnId, readerType, context);
case DATE:
throw new IllegalArgumentException("No conversion of type " +
readerType.getCategory() + " to self needed");
// Not currently supported conversion(s):
case BOOLEAN:
case BYTE:
case FLOAT:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case BINARY:
case DECIMAL:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
private static TypeReader createBinaryConvertTreeReader(int columnId,
TypeDescription readerType,
Context context) throws IOException {
// CONVERT from BINARY to schema type.
switch (readerType.getCategory()) {
case STRING:
case CHAR:
case VARCHAR:
return new StringGroupFromBinaryTreeReader(columnId, readerType, context);
case BINARY:
throw new IllegalArgumentException("No conversion of type " +
readerType.getCategory() + " to self needed");
// Not currently supported conversion(s):
case BOOLEAN:
case BYTE:
case FLOAT:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
case DECIMAL:
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
readerType.getCategory());
}
}
/**
* (Rules from Hive's PrimitiveObjectInspectorUtils conversion)
*
* To BOOLEAN, BYTE, SHORT, INT, LONG:
* Convert from (BOOLEAN, BYTE, SHORT, INT, LONG) with down cast if necessary.
* Convert from (FLOAT, DOUBLE) using type cast to long and down cast if necessary.
* Convert from DECIMAL from longValue and down cast if necessary.
* Convert from STRING using LazyLong.parseLong and down cast if necessary.
* Convert from (CHAR, VARCHAR) from Integer.parseLong and down cast if necessary.
* Convert from TIMESTAMP using timestamp getSeconds and down cast if necessary.
*
* AnyIntegerFromAnyIntegerTreeReader (written)
* AnyIntegerFromFloatTreeReader (written)
* AnyIntegerFromDoubleTreeReader (written)
* AnyIntegerFromDecimalTreeReader (written)
* AnyIntegerFromStringGroupTreeReader (written)
* AnyIntegerFromTimestampTreeReader (written)
*
* To FLOAT/DOUBLE:
* Convert from (BOOLEAN, BYTE, SHORT, INT, LONG) using cast
* Convert from FLOAT using cast
* Convert from DECIMAL using getDouble
* Convert from (STRING, CHAR, VARCHAR) using Double.parseDouble
* Convert from TIMESTAMP using timestamp getDouble
*
* FloatFromAnyIntegerTreeReader (existing)
* FloatFromDoubleTreeReader (written)
* FloatFromDecimalTreeReader (written)
* FloatFromStringGroupTreeReader (written)
*
* DoubleFromAnyIntegerTreeReader (existing)
* DoubleFromFloatTreeReader (existing)
* DoubleFromDecimalTreeReader (written)
* DoubleFromStringGroupTreeReader (written)
*
* To DECIMAL:
* Convert from (BOOLEAN, BYTE, SHORT, INT, LONG) using to HiveDecimal.create()
* Convert from (FLOAT, DOUBLE) using to HiveDecimal.create(string value)
* Convert from (STRING, CHAR, VARCHAR) using HiveDecimal.create(string value)
* Convert from TIMESTAMP using HiveDecimal.create(string value of timestamp getDouble)
*
* DecimalFromAnyIntegerTreeReader (existing)
* DecimalFromFloatTreeReader (existing)
* DecimalFromDoubleTreeReader (existing)
* DecimalFromStringGroupTreeReader (written)
*
* To STRING, CHAR, VARCHAR:
* Convert from (BYTE, SHORT, INT, LONG) using to string conversion
* Convert from BOOLEAN using boolean (True/False) conversion
* Convert from (FLOAT, DOUBLE) using to string conversion
* Convert from DECIMAL using HiveDecimal.toString
* Convert from CHAR by stripping pads
* Convert from VARCHAR with value
* Convert from TIMESTAMP using Timestamp.toString
* Convert from DATE using Date.toString
* Convert from BINARY using Text.decode
*
* StringGroupFromAnyIntegerTreeReader (written)
* StringGroupFromBooleanTreeReader (written)
* StringGroupFromFloatTreeReader (written)
* StringGroupFromDoubleTreeReader (written)
* StringGroupFromDecimalTreeReader (written)
*
* String from Char/Varchar conversion
* Char from String/Varchar conversion
* Varchar from String/Char conversion
*
* StringGroupFromTimestampTreeReader (written)
* StringGroupFromDateTreeReader (written)
* StringGroupFromBinaryTreeReader *****
*
* To TIMESTAMP:
* Convert from (BOOLEAN, BYTE, SHORT, INT, LONG) using TimestampWritable.longToTimestamp
* Convert from (FLOAT, DOUBLE) using TimestampWritable.doubleToTimestamp
* Convert from DECIMAL using TimestampWritable.decimalToTimestamp
* Convert from (STRING, CHAR, VARCHAR) using string conversion
* Or, from DATE
*
* TimestampFromAnyIntegerTreeReader (written)
* TimestampFromFloatTreeReader (written)
* TimestampFromDoubleTreeReader (written)
* TimestampFromDecimalTreeReader (written)
* TimestampFromStringGroupTreeReader (written)
* TimestampFromDateTreeReader
*
*
* To DATE:
* Convert from (STRING, CHAR, VARCHAR) using string conversion.
* Or, from TIMESTAMP.
*
* DateFromStringGroupTreeReader (written)
* DateFromTimestampTreeReader (written)
*
* To BINARY:
* Convert from (STRING, CHAR, VARCHAR) using getBinaryFromText
*
* BinaryFromStringGroupTreeReader (written)
*
* (Notes from StructConverter)
*
* To STRUCT:
* Input must be data type STRUCT
* minFields = Math.min(numSourceFields, numTargetFields)
* Convert those fields
* Extra targetFields to NULL
*
* (Notes from ListConverter)
*
* To LIST:
* Input must be data type LIST
* Convert elements
*
* (Notes from MapConverter)
*
* To MAP:
* Input must be data type MAP
* Convert keys and values
*
* (Notes from UnionConverter)
*
* To UNION:
* Input must be data type UNION
* Convert value for tag
*
* @param readerType
* @return
* @throws IOException
*/
public static TypeReader createConvertTreeReader(TypeDescription readerType,
Context context) throws IOException {
final SchemaEvolution evolution = context.getSchemaEvolution();
TypeDescription fileType = evolution.getFileType(readerType.getId());
int columnId = fileType.getId();
switch (fileType.getCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
return createAnyIntegerConvertTreeReader(columnId, fileType, readerType, context);
case BOOLEAN:
return createBooleanConvertTreeReader(columnId, fileType, readerType, context);
case FLOAT:
case DOUBLE:
return createDoubleConvertTreeReader(columnId, fileType, readerType, context);
case DECIMAL:
return createDecimalConvertTreeReader(columnId, fileType, readerType, context);
case STRING:
case CHAR:
case VARCHAR:
return createStringConvertTreeReader(columnId, fileType, readerType, context);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return createTimestampConvertTreeReader(columnId, fileType, readerType, context);
case DATE:
return createDateConvertTreeReader(columnId, readerType, context);
case BINARY:
return createBinaryConvertTreeReader(columnId, readerType, context);
// UNDONE: Complex conversions...
case STRUCT:
case LIST:
case MAP:
case UNION:
default:
throw new IllegalArgumentException("Unsupported type " +
fileType.getCategory());
}
}
public static boolean canConvert(TypeDescription fileType, TypeDescription readerType) {
Category readerTypeCategory = readerType.getCategory();
// We don't convert from any to complex.
switch (readerTypeCategory) {
case STRUCT:
case LIST:
case MAP:
case UNION:
return false;
default:
// Fall through.
}
// Now look for the few cases we don't convert from
switch (fileType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case DECIMAL:
switch (readerType.getCategory()) {
// Not currently supported conversion(s):
case BINARY:
case DATE:
return false;
default:
return true;
}
case STRING:
case CHAR:
case VARCHAR:
switch (readerType.getCategory()) {
// Not currently supported conversion(s):
// (None)
default:
return true;
}
case TIMESTAMP:
case TIMESTAMP_INSTANT:
switch (readerType.getCategory()) {
// Not currently supported conversion(s):
case BINARY:
return false;
default:
return true;
}
case DATE:
switch (readerType.getCategory()) {
// Not currently supported conversion(s):
case BOOLEAN:
case BYTE:
case FLOAT:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case BINARY:
case DECIMAL:
return false;
default:
return true;
}
case BINARY:
switch (readerType.getCategory()) {
// Not currently supported conversion(s):
case BOOLEAN:
case BYTE:
case FLOAT:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
case DECIMAL:
return false;
default:
return true;
}
// We don't convert from complex to any.
case STRUCT:
case LIST:
case MAP:
case UNION:
return false;
default:
throw new IllegalArgumentException("Unsupported type " +
fileType.getCategory());
}
}
}
| 97,047 | 37.058039 | 113 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/CryptoUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcProto;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.ServiceLoader;
import java.util.function.Consumer;
/**
* This class has routines to work with encryption within ORC files.
*/
public class CryptoUtils {
private static final int COLUMN_ID_LENGTH = 3;
private static final int KIND_LENGTH = 2;
private static final int STRIPE_ID_LENGTH = 3;
private static final int MIN_COUNT_BYTES = 8;
static final int MAX_COLUMN = 0xffffff;
static final int MAX_KIND = 0xffff;
static final int MAX_STRIPE = 0xffffff;
/**
* Update the unique IV for each stream within a single key.
* The top bytes are set with the column, stream kind, and stripe id and the
* lower 8 bytes are always 0.
* @param name the stream name
* @param stripeId the stripe id
*/
public static Consumer<byte[]> modifyIvForStream(StreamName name,
long stripeId) {
return modifyIvForStream(name.getColumn(), name.getKind(), stripeId);
}
/**
* Update the unique IV for each stream within a single key.
* The top bytes are set with the column, stream kind, and stripe id and the
* lower 8 bytes are always 0.
* @param columnId the column id
* @param kind the stream kind
* @param stripeId the stripe id
*/
public static Consumer<byte[]> modifyIvForStream(int columnId,
OrcProto.Stream.Kind kind,
long stripeId) {
if (columnId < 0 || columnId > MAX_COLUMN) {
throw new IllegalArgumentException("ORC encryption is limited to " +
MAX_COLUMN + " columns. Value = " + columnId);
}
int k = kind.getNumber();
if (k < 0 || k > MAX_KIND) {
throw new IllegalArgumentException("ORC encryption is limited to " +
MAX_KIND + " stream kinds. Value = " + k);
}
return (byte[] iv) -> {
// the rest of the iv is used for counting within the stream
if (iv.length - (COLUMN_ID_LENGTH + KIND_LENGTH + STRIPE_ID_LENGTH) < MIN_COUNT_BYTES) {
throw new IllegalArgumentException("Not enough space in the iv for the count");
}
iv[0] = (byte) (columnId >> 16);
iv[1] = (byte) (columnId >> 8);
iv[2] = (byte) columnId;
iv[COLUMN_ID_LENGTH] = (byte) (k >> 8);
iv[COLUMN_ID_LENGTH + 1] = (byte) (k);
modifyIvForStripe(stripeId).accept(iv);
};
}
/**
* Modify the IV for the given stripe id and make sure the low bytes are
* set to 0.
* @param stripeId the stripe id
*/
public static Consumer<byte[]> modifyIvForStripe(long stripeId) {
if (stripeId < 1 || stripeId > MAX_STRIPE) {
throw new IllegalArgumentException("ORC encryption is limited to " +
MAX_STRIPE + " stripes. Value = " +
stripeId);
}
return (byte[] iv) -> {
iv[COLUMN_ID_LENGTH + KIND_LENGTH] = (byte) (stripeId >> 16);
iv[COLUMN_ID_LENGTH + KIND_LENGTH + 1] = (byte) (stripeId >> 8);
iv[COLUMN_ID_LENGTH + KIND_LENGTH + 2] = (byte) stripeId;
clearCounter(iv);
};
}
/**
* Clear the counter part of the IV.
* @param iv the IV to modify
*/
public static void clearCounter(byte[] iv) {
for(int i= COLUMN_ID_LENGTH + KIND_LENGTH + STRIPE_ID_LENGTH; i < iv.length; ++i) {
iv[i] = 0;
}
}
/** A cache for the key providers */
private static final Map<String, KeyProvider> keyProviderCache = new HashMap<>();
/**
* Create a KeyProvider.
* It will cache the result, so that only one provider of each kind will be
* created.
*
* @param random the random generator to use
* @return the new KeyProvider
*/
public static KeyProvider getKeyProvider(Configuration conf,
Random random) throws IOException {
String kind = OrcConf.KEY_PROVIDER.getString(conf);
String cacheKey = kind + "." + random.getClass().getName();
KeyProvider result = keyProviderCache.get(cacheKey);
if (result == null) {
ServiceLoader<KeyProvider.Factory> loader = ServiceLoader.load(KeyProvider.Factory.class);
for (KeyProvider.Factory factory : loader) {
result = factory.create(kind, conf, random);
if (result != null) {
keyProviderCache.put(cacheKey, result);
break;
}
}
}
return result;
}
public static class HadoopKeyProviderFactory implements KeyProvider.Factory {
@Override
public KeyProvider create(String kind,
Configuration conf,
Random random) throws IOException {
if ("hadoop".equals(kind)) {
return HadoopShimsFactory.get().getHadoopKeyProvider(conf, random);
} else if ("memory".equals(kind)) {
return new InMemoryKeystore(random);
}
return null;
}
}
}
| 6,056 | 35.487952 | 96 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DataReaderProperties.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.OrcConf;
import java.util.function.Supplier;
public final class DataReaderProperties {
private final Supplier<FileSystem> fileSystemSupplier;
private final Path path;
private final FSDataInputStream file;
private final InStream.StreamOptions compression;
private final boolean zeroCopy;
private final int maxDiskRangeChunkLimit;
private final int minSeekSize;
private final double minSeekSizeTolerance;
private DataReaderProperties(Builder builder) {
this.fileSystemSupplier = builder.fileSystemSupplier;
this.path = builder.path;
this.file = builder.file;
this.compression = builder.compression;
this.zeroCopy = builder.zeroCopy;
this.maxDiskRangeChunkLimit = builder.maxDiskRangeChunkLimit;
this.minSeekSize = builder.minSeekSize;
this.minSeekSizeTolerance = builder.minSeekSizeTolerance;
}
public Supplier<FileSystem> getFileSystemSupplier() {
return fileSystemSupplier;
}
public Path getPath() {
return path;
}
public FSDataInputStream getFile() {
return file;
}
public InStream.StreamOptions getCompression() {
return compression;
}
public boolean getZeroCopy() {
return zeroCopy;
}
public int getMaxDiskRangeChunkLimit() {
return maxDiskRangeChunkLimit;
}
public static Builder builder() {
return new Builder();
}
public int getMinSeekSize() {
return minSeekSize;
}
public double getMinSeekSizeTolerance() {
return minSeekSizeTolerance;
}
public static class Builder {
private Supplier<FileSystem> fileSystemSupplier;
private Path path;
private FSDataInputStream file;
private InStream.StreamOptions compression;
private boolean zeroCopy;
private int maxDiskRangeChunkLimit =
(int) OrcConf.ORC_MAX_DISK_RANGE_CHUNK_LIMIT.getDefaultValue();
private int minSeekSize = (int) OrcConf.ORC_MIN_DISK_SEEK_SIZE.getDefaultValue();
private double minSeekSizeTolerance = (double) OrcConf.ORC_MIN_DISK_SEEK_SIZE_TOLERANCE
.getDefaultValue();
private Builder() {
}
public Builder withFileSystemSupplier(Supplier<FileSystem> supplier) {
this.fileSystemSupplier = supplier;
return this;
}
public Builder withFileSystem(FileSystem filesystem) {
this.fileSystemSupplier = () -> filesystem;
return this;
}
public Builder withPath(Path path) {
this.path = path;
return this;
}
public Builder withFile(FSDataInputStream file) {
this.file = file;
return this;
}
public Builder withCompression(InStream.StreamOptions value) {
this.compression = value;
return this;
}
public Builder withZeroCopy(boolean zeroCopy) {
this.zeroCopy = zeroCopy;
return this;
}
public Builder withMaxDiskRangeChunkLimit(int value) {
maxDiskRangeChunkLimit = value;
return this;
}
public Builder withMinSeekSize(int value) {
minSeekSize = value;
return this;
}
public Builder withMinSeekSizeTolerance(double value) {
minSeekSizeTolerance = value;
return this;
}
public DataReaderProperties build() {
if (fileSystemSupplier == null || path == null) {
throw new NullPointerException("Filesystem = " + fileSystemSupplier +
", path = " + path);
}
return new DataReaderProperties(this);
}
}
}
| 4,398 | 26.841772 | 91 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DateUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.threeten.extra.chrono.HybridChronology;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.chrono.IsoChronology;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.time.temporal.TemporalAccessor;
import java.util.concurrent.TimeUnit;
/**
* Conversion utilities from the hybrid Julian/Gregorian calendar to/from the
* proleptic Gregorian.
* <p>
* The semantics here are to hold the string representation constant and change
* the epoch offset rather than holding the instant in time constant and change
* the string representation.
* <p>
* These utilities will be fast for the common case (> 1582 AD), but slow
* for old dates.
*/
public class DateUtils {
private static final ZoneId UTC = ZoneId.of("UTC");
private static final ZoneId LOCAL = ZoneId.systemDefault();
private static final long SWITCHOVER_MILLIS;
private static final long SWITCHOVER_DAYS;
private static final DateTimeFormatter HYBRID_DATE_FORMAT =
ConvertTreeReaderFactory.DATE_FORMAT
.withChronology(HybridChronology.INSTANCE)
.withZone(UTC);
private static final DateTimeFormatter PROLEPTIC_DATE_FORMAT =
DateTimeFormatter.ISO_LOCAL_DATE
.withChronology(IsoChronology.INSTANCE)
.withZone(UTC);
private static final DateTimeFormatter HYBRID_UTC_TIME_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withChronology(HybridChronology.INSTANCE)
.withZone(UTC);
private static final DateTimeFormatter HYBRID_LOCAL_TIME_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withChronology(HybridChronology.INSTANCE)
.withZone(LOCAL);
private static final DateTimeFormatter PROLEPTIC_UTC_TIME_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withChronology(IsoChronology.INSTANCE)
.withZone(UTC);
private static final DateTimeFormatter PROLEPTIC_LOCAL_TIME_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withChronology(IsoChronology.INSTANCE)
.withZone(LOCAL);
static {
// Get the last day where the two calendars agree with each other.
SWITCHOVER_DAYS = LocalDate.from(HYBRID_DATE_FORMAT.parse("1582-10-15")).toEpochDay();
SWITCHOVER_MILLIS = TimeUnit.DAYS.toMillis(SWITCHOVER_DAYS);
}
/**
* Convert an epoch day from the hybrid Julian/Gregorian calendar to the
* proleptic Gregorian.
* @param hybrid day of epoch in the hybrid Julian/Gregorian
* @return day of epoch in the proleptic Gregorian
*/
public static int convertDateToProleptic(int hybrid) {
int proleptic = hybrid;
if (hybrid < SWITCHOVER_DAYS) {
String dateStr = HYBRID_DATE_FORMAT.format(LocalDate.ofEpochDay(proleptic));
proleptic = (int) LocalDate.from(PROLEPTIC_DATE_FORMAT.parse(dateStr)).toEpochDay();
}
return proleptic;
}
/**
* Convert an epoch day from the proleptic Gregorian calendar to the hybrid
* Julian/Gregorian.
* @param proleptic day of epoch in the proleptic Gregorian
* @return day of epoch in the hybrid Julian/Gregorian
*/
public static int convertDateToHybrid(int proleptic) {
int hybrid = proleptic;
if (proleptic < SWITCHOVER_DAYS) {
String dateStr = PROLEPTIC_DATE_FORMAT.format(LocalDate.ofEpochDay(proleptic));
hybrid = (int) LocalDate.from(HYBRID_DATE_FORMAT.parse(dateStr)).toEpochDay();
}
return hybrid;
}
/**
* Convert epoch millis from the hybrid Julian/Gregorian calendar to the
* proleptic Gregorian.
* @param hybrid millis of epoch in the hybrid Julian/Gregorian
* @param useUtc use UTC instead of local
* @return millis of epoch in the proleptic Gregorian
*/
public static long convertTimeToProleptic(long hybrid, boolean useUtc) {
long proleptic = hybrid;
if (hybrid < SWITCHOVER_MILLIS) {
if (useUtc) {
String dateStr = HYBRID_UTC_TIME_FORMAT.format(Instant.ofEpochMilli(hybrid));
proleptic = Instant.from(PROLEPTIC_UTC_TIME_FORMAT.parse(dateStr)).toEpochMilli();
} else {
String dateStr = HYBRID_LOCAL_TIME_FORMAT.format(Instant.ofEpochMilli(hybrid));
proleptic = Instant.from(PROLEPTIC_LOCAL_TIME_FORMAT.parse(dateStr)).toEpochMilli();
}
}
return proleptic;
}
/**
* Convert epoch millis from the proleptic Gregorian calendar to the hybrid
* Julian/Gregorian.
* @param proleptic millis of epoch in the proleptic Gregorian
* @param useUtc use UTC instead of local
* @return millis of epoch in the hybrid Julian/Gregorian
*/
public static long convertTimeToHybrid(long proleptic, boolean useUtc) {
long hybrid = proleptic;
if (proleptic < SWITCHOVER_MILLIS) {
if (useUtc) {
String dateStr = PROLEPTIC_UTC_TIME_FORMAT.format(Instant.ofEpochMilli(hybrid));
hybrid = Instant.from(HYBRID_UTC_TIME_FORMAT.parse(dateStr)).toEpochMilli();
} else {
String dateStr = PROLEPTIC_LOCAL_TIME_FORMAT.format(Instant.ofEpochMilli(hybrid));
hybrid = Instant.from(HYBRID_LOCAL_TIME_FORMAT.parse(dateStr)).toEpochMilli();
}
}
return hybrid;
}
public static int convertDate(int original,
boolean fromProleptic,
boolean toProleptic) {
if (fromProleptic != toProleptic) {
return toProleptic
? convertDateToProleptic(original)
: convertDateToHybrid(original);
} else {
return original;
}
}
public static long convertTime(long original,
boolean fromProleptic,
boolean toProleptic,
boolean useUtc) {
if (fromProleptic != toProleptic) {
return toProleptic
? convertTimeToProleptic(original, useUtc)
: convertTimeToHybrid(original, useUtc);
} else {
return original;
}
}
public static Integer parseDate(String date, boolean fromProleptic) {
try {
TemporalAccessor time =
(fromProleptic ? PROLEPTIC_DATE_FORMAT : HYBRID_DATE_FORMAT).parse(date);
return (int) LocalDate.from(time).toEpochDay();
} catch (DateTimeParseException e) {
return null;
}
}
public static String printDate(int date, boolean fromProleptic) {
return (fromProleptic ? PROLEPTIC_DATE_FORMAT : HYBRID_DATE_FORMAT)
.format(LocalDate.ofEpochDay(date));
}
public static DateTimeFormatter getTimeFormat(boolean useProleptic,
boolean useUtc) {
if (useProleptic) {
return useUtc ? PROLEPTIC_UTC_TIME_FORMAT : PROLEPTIC_LOCAL_TIME_FORMAT;
} else {
return useUtc ? HYBRID_UTC_TIME_FORMAT : HYBRID_LOCAL_TIME_FORMAT;
}
}
public static Long parseTime(String date, boolean fromProleptic, boolean useUtc) {
try {
TemporalAccessor time = getTimeFormat(fromProleptic, useUtc).parse(date);
return Instant.from(time).toEpochMilli();
} catch (DateTimeParseException e) {
return null;
}
}
public static String printTime(long millis, boolean fromProleptic,
boolean useUtc) {
return getTimeFormat(fromProleptic, useUtc).format(Instant.ofEpochMilli(millis));
}
private DateUtils() {
throw new UnsupportedOperationException();
}
}
| 8,277 | 36.972477 | 92 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/Dictionary.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Interface to define the dictionary used for encoding value in columns
* of specific types like string, char, varchar, etc.
*/
public interface Dictionary {
enum IMPL {
RBTREE,
HASH
}
int INITIAL_DICTIONARY_SIZE = 4096;
/**
* Traverse the whole dictionary and apply the action.
*/
void visit(Visitor visitor) throws IOException;
void clear();
/**
* Given the position index, return the original string before being encoded.
* The value of the Text in the Dictionary is copied into {@code result}.
*
* @param result the holder to copy the dictionary text into
* @param position the position where the key was added
*/
void getText(Text result, int position);
ByteBuffer getText(int position);
/**
* Given the position index, write the original string, before being encoded,
* to the OutputStream.
*
* @param out the output stream to which to write the data
* @param position the position where the key was originally added
* @return the number of byte written to the stream
* @throws IOException if an I/O error occurs
*/
int writeTo(OutputStream out, int position) throws IOException;
int add(byte[] bytes, int offset, int length);
int size();
long getSizeInBytes();
/**
* The information about each node.
*/
interface VisitorContext {
/**
* Get the position where the key was originally added.
* @return the number returned by add.
*/
int getOriginalPosition();
/**
* Write the bytes for the string to the given output stream.
* @param out the stream to write to.
* @throws IOException
*/
void writeBytes(OutputStream out) throws IOException;
/**
* Get the original string.
* @return the string
*/
Text getText();
/**
* Get the number of bytes.
* @return the string's length in bytes
*/
int getLength();
}
/**
* The interface for visitors.
*/
interface Visitor {
/**
* Called once for each node of the tree in sort order.
* @param context the information about each node
* @throws IOException
*/
void visit(VisitorContext context) throws IOException;
}
}
| 3,170 | 26.102564 | 79 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DictionaryUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
public class DictionaryUtils {
private DictionaryUtils() {
// Utility class does nothing in constructor
}
/**
* Obtain the UTF8 string from the byteArray using the offset in index-array.
* @param result Container for the UTF8 String.
* @param position position in the keyOffsets
* @param keyOffsets starting offset of the key (in byte) in the byte array.
* @param byteArray storing raw bytes of all keys seen in dictionary
*/
public static void getTextInternal(Text result, int position,
DynamicIntArray keyOffsets, DynamicByteArray byteArray) {
int offset = keyOffsets.get(position);
int length;
if (position + 1 == keyOffsets.size()) {
length = byteArray.size() - offset;
} else {
length = keyOffsets.get(position + 1) - offset;
}
byteArray.setText(result, offset, length);
}
/**
* Return a {@code ByteBuffer} containing the data at a certain offset within a
* {@code DynamicByteArray}.
*
* @param position position in the keyOffsets
* @param keyOffsets starting offset of the key (in byte) in the byte array
* @param byteArray storing raw bytes of all keys seen in dictionary
* @return the number of bytes written to the output stream
*/
public static ByteBuffer getTextInternal(int position, DynamicIntArray keyOffsets,
DynamicByteArray byteArray) {
final int offset = keyOffsets.get(position);
final int length;
if (position + 1 == keyOffsets.size()) {
length = byteArray.size() - offset;
} else {
length = keyOffsets.get(position + 1) - offset;
}
return byteArray.get(offset, length);
}
/**
* Write a UTF8 string from the byteArray, using the offset in index-array,
* into an OutputStream
*
* @param out the output stream
* @param position position in the keyOffsets
* @param keyOffsets starting offset of the key (in byte) in the byte array
* @param byteArray storing raw bytes of all keys seen in dictionary
* @return the number of bytes written to the output stream
* @throws IOException if an I/O error occurs
*/
public static int writeToTextInternal(OutputStream out, int position,
DynamicIntArray keyOffsets, DynamicByteArray byteArray)
throws IOException {
int offset = keyOffsets.get(position);
int length;
if (position + 1 == keyOffsets.size()) {
length = byteArray.size() - offset;
} else {
length = keyOffsets.get(position + 1) - offset;
}
byteArray.write(out, offset, length);
return length;
}
/**
* Compare a UTF8 string from the byteArray using the offset in index-array.
*
* @param bytes an array containing bytes to search for
* @param offset the offset in the array
* @param length the number of bytes to search for
* @param position position in the keyOffsets
* @param keyOffsets starting offset of the key (in byte) in the byte array
* @param byteArray storing raw bytes of all key seen in dictionary
* @return true if the text is equal to the value within the byteArray; false
* otherwise
*/
public static boolean equalsInternal(byte[] bytes, int offset, int length, int position,
DynamicIntArray keyOffsets, DynamicByteArray byteArray) {
final int byteArrayOffset = keyOffsets.get(position);
final int keyLength;
if (position + 1 == keyOffsets.size()) {
keyLength = byteArray.size() - byteArrayOffset;
} else {
keyLength = keyOffsets.get(position + 1) - byteArrayOffset;
}
return 0 == byteArray.compare(bytes, offset, length, byteArrayOffset,
keyLength);
}
}
| 4,576 | 36.826446 | 90 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DirectDecompressionCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import java.io.IOException;
import java.nio.ByteBuffer;
public interface DirectDecompressionCodec extends CompressionCodec {
boolean isAvailable();
void directDecompress(ByteBuffer in, ByteBuffer out) throws IOException;
}
| 1,105 | 37.137931 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DynamicByteArray.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import org.apache.orc.OrcConf;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* A class that is a growable array of bytes. Growth is managed in terms of
* chunks that are allocated when needed.
*/
public final class DynamicByteArray {
static final int DEFAULT_CHUNKSIZE = 32 * 1024;
static final int DEFAULT_NUM_CHUNKS = 128;
private final int chunkSize; // our allocation sizes
private byte[][] data; // the real data
private int length; // max set element index +1
private int initializedChunks = 0; // the number of chunks created
public DynamicByteArray() {
this(DEFAULT_NUM_CHUNKS, DEFAULT_CHUNKSIZE);
}
public DynamicByteArray(int numChunks, int chunkSize) {
if (chunkSize == 0) {
throw new IllegalArgumentException("bad chunksize");
}
this.chunkSize = chunkSize;
data = new byte[numChunks][];
}
/**
* Ensure that the given index is valid.
* Throws an exception if chunkIndex is negative.
*/
private void grow(int chunkIndex) {
if (chunkIndex < 0) {
throw new RuntimeException(String.format("chunkIndex overflow:%d. " +
"You can set %s=columnName, or %s=0 to turn off dictionary encoding.",
chunkIndex,
OrcConf.DIRECT_ENCODING_COLUMNS.getAttribute(),
OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute()));
}
if (chunkIndex >= initializedChunks) {
if (chunkIndex >= data.length) {
int newSize = Math.max(chunkIndex + 1, 2 * data.length);
data = Arrays.copyOf(data, newSize);
}
for (int i = initializedChunks; i <= chunkIndex; ++i) {
data[i] = new byte[chunkSize];
}
initializedChunks = chunkIndex + 1;
}
}
public byte get(int index) {
if (index >= length) {
throw new IndexOutOfBoundsException("Index " + index +
" is outside of 0.." +
(length - 1));
}
int i = index / chunkSize;
int j = index % chunkSize;
return data[i][j];
}
public void set(int index, byte value) {
int i = index / chunkSize;
int j = index % chunkSize;
grow(i);
if (index >= length) {
length = index + 1;
}
data[i][j] = value;
}
public int add(byte value) {
int i = length / chunkSize;
int j = length % chunkSize;
grow(i);
data[i][j] = value;
int result = length;
length += 1;
return result;
}
/**
* Copy a slice of a byte array into our buffer.
* @param value the array to copy from
* @param valueOffset the first location to copy from value
* @param valueLength the number of bytes to copy from value
* @return the offset of the start of the value
*/
public int add(byte[] value, int valueOffset, int valueLength) {
int i = length / chunkSize;
int j = length % chunkSize;
grow((length + valueLength) / chunkSize);
int remaining = valueLength;
while (remaining > 0) {
int size = Math.min(remaining, chunkSize - j);
System.arraycopy(value, valueOffset, data[i], j, size);
remaining -= size;
valueOffset += size;
i += 1;
j = 0;
}
int result = length;
length += valueLength;
return result;
}
/**
* Read the entire stream into this array.
* @param in the stream to read from
* @throws IOException
*/
public void readAll(InputStream in) throws IOException {
int currentChunk = length / chunkSize;
int currentOffset = length % chunkSize;
grow(currentChunk);
int currentLength = in.read(data[currentChunk], currentOffset,
chunkSize - currentOffset);
while (currentLength > 0) {
length += currentLength;
currentOffset = length % chunkSize;
if (currentOffset == 0) {
currentChunk = length / chunkSize;
grow(currentChunk);
}
currentLength = in.read(data[currentChunk], currentOffset,
chunkSize - currentOffset);
}
}
/**
* Byte compare a set of bytes against the bytes in this dynamic array.
* @param other source of the other bytes
* @param otherOffset start offset in the other array
* @param otherLength number of bytes in the other array
* @param ourOffset the offset in our array
* @param ourLength the number of bytes in our array
* @return negative for less, 0 for equal, positive for greater
*/
public int compare(byte[] other, int otherOffset, int otherLength,
int ourOffset, int ourLength) {
int currentChunk = ourOffset / chunkSize;
int currentOffset = ourOffset % chunkSize;
int maxLength = Math.min(otherLength, ourLength);
while (maxLength > 0 &&
other[otherOffset] == data[currentChunk][currentOffset]) {
otherOffset += 1;
currentOffset += 1;
if (currentOffset == chunkSize) {
currentChunk += 1;
currentOffset = 0;
}
maxLength -= 1;
}
if (maxLength == 0) {
return otherLength - ourLength;
}
int otherByte = 0xff & other[otherOffset];
int ourByte = 0xff & data[currentChunk][currentOffset];
return otherByte > ourByte ? 1 : -1;
}
/**
* Get the size of the array.
* @return the number of bytes in the array
*/
public int size() {
return length;
}
/**
* Clear the array to its original pristine state.
*/
public void clear() {
length = 0;
for(int i=0; i < data.length; ++i) {
data[i] = null;
}
initializedChunks = 0;
}
/**
* Set a text value from the bytes in this dynamic array.
* @param result the value to set
* @param offset the start of the bytes to copy
* @param length the number of bytes to copy
*/
public void setText(Text result, int offset, int length) {
result.clear();
int currentChunk = offset / chunkSize;
int currentOffset = offset % chunkSize;
int currentLength = Math.min(length, chunkSize - currentOffset);
while (length > 0) {
result.append(data[currentChunk], currentOffset, currentLength);
length -= currentLength;
currentChunk += 1;
currentOffset = 0;
currentLength = Math.min(length, chunkSize - currentOffset);
}
}
/**
* Write out a range of this dynamic array to an output stream.
* @param out the stream to write to
* @param offset the first offset to write
* @param length the number of bytes to write
* @throws IOException
*/
public void write(OutputStream out, int offset,
int length) throws IOException {
int currentChunk = offset / chunkSize;
int currentOffset = offset % chunkSize;
while (length > 0) {
int currentLength = Math.min(length, chunkSize - currentOffset);
out.write(data[currentChunk], currentOffset, currentLength);
length -= currentLength;
currentChunk += 1;
currentOffset = 0;
}
}
@Override
public String toString() {
int i;
StringBuilder sb = new StringBuilder(length * 3);
sb.append('{');
int l = length - 1;
for (i=0; i<l; i++) {
sb.append(Integer.toHexString(get(i)));
sb.append(',');
}
sb.append(get(i));
sb.append('}');
return sb.toString();
}
public void setByteBuffer(ByteBuffer result, int offset, int length) {
result.clear();
int currentChunk = offset / chunkSize;
int currentOffset = offset % chunkSize;
int currentLength = Math.min(length, chunkSize - currentOffset);
while (length > 0) {
result.put(data[currentChunk], currentOffset, currentLength);
length -= currentLength;
currentChunk += 1;
currentOffset = 0;
currentLength = Math.min(length, chunkSize - currentOffset);
}
}
/**
* Gets all the bytes of the array.
*
* @return Bytes of the array
*/
public byte[] get() {
byte[] result = null;
if (length > 0) {
int currentChunk = 0;
int currentOffset = 0;
int currentLength = Math.min(length, chunkSize);
int destOffset = 0;
result = new byte[length];
int totalLength = length;
while (totalLength > 0) {
System.arraycopy(data[currentChunk], currentOffset, result, destOffset, currentLength);
destOffset += currentLength;
totalLength -= currentLength;
currentChunk += 1;
currentOffset = 0;
currentLength = Math.min(totalLength, chunkSize - currentOffset);
}
}
return result;
}
public ByteBuffer get(int offset, int length) {
final int currentChunk = offset / chunkSize;
final int currentOffset = offset % chunkSize;
final int currentLength = Math.min(length, chunkSize - currentOffset);
if (currentLength == length) {
return ByteBuffer.wrap(data[currentChunk], currentOffset, length);
}
ByteBuffer bb = ByteBuffer.allocate(length);
setByteBuffer(bb, offset, length);
return (ByteBuffer) bb.flip();
}
/**
* Get the size of the buffers.
*/
public long getSizeInBytes() {
return (long) initializedChunks * chunkSize;
}
}
| 10,007 | 29.888889 | 95 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/DynamicIntArray.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.util.Arrays;
/**
* Dynamic int array that uses primitive types and chunks to avoid copying
* large number of integers when it resizes.
* <p>
* The motivation for this class is memory optimization, i.e. space efficient
* storage of potentially huge arrays without good a-priori size guesses.
* <p>
* The API of this class is between a primitive array and a AbstractList. It's
* not a Collection implementation because it handles primitive types, but the
* API could be extended to support iterators and the like.
* <p>
* NOTE: Like standard Collection implementations/arrays, this class is not
* synchronized.
*/
public final class DynamicIntArray {
static final int DEFAULT_CHUNKSIZE = 8 * 1024;
static final int INIT_CHUNKS = 128;
private final int chunkSize; // our allocation size
private int[][] data; // the real data
private int length; // max set element index +1
private int initializedChunks = 0; // the number of created chunks
public DynamicIntArray() {
this(DEFAULT_CHUNKSIZE);
}
public DynamicIntArray(int chunkSize) {
this.chunkSize = chunkSize;
data = new int[INIT_CHUNKS][];
}
/**
* Ensure that the given index is valid.
*/
private void grow(int chunkIndex) {
if (chunkIndex >= initializedChunks) {
if (chunkIndex >= data.length) {
int newSize = Math.max(chunkIndex + 1, 2 * data.length);
data = Arrays.copyOf(data, newSize);
}
for (int i=initializedChunks; i <= chunkIndex; ++i) {
data[i] = new int[chunkSize];
}
initializedChunks = chunkIndex + 1;
}
}
public int get(int index) {
if (index >= length) {
throw new IndexOutOfBoundsException("Index " + index +
" is outside of 0.." +
(length - 1));
}
int i = index / chunkSize;
int j = index % chunkSize;
return data[i][j];
}
public void set(int index, int value) {
int i = index / chunkSize;
int j = index % chunkSize;
grow(i);
if (index >= length) {
length = index + 1;
}
data[i][j] = value;
}
public void increment(int index, int value) {
int i = index / chunkSize;
int j = index % chunkSize;
grow(i);
if (index >= length) {
length = index + 1;
}
data[i][j] += value;
}
public void add(int value) {
int i = length / chunkSize;
int j = length % chunkSize;
grow(i);
data[i][j] = value;
length += 1;
}
public int size() {
return length;
}
public void clear() {
length = 0;
for(int i=0; i < data.length; ++i) {
data[i] = null;
}
initializedChunks = 0;
}
@Override
public String toString() {
int l = length - 1;
if (l == -1) {
return "{}";
}
StringBuilder sb = new StringBuilder(length * 4);
sb.append('{');
for (int i = 0; i <= l; i++) {
sb.append(get(i));
if (i != l) {
sb.append(",");
}
}
return sb.append('}').toString();
}
public int getSizeInBytes() {
return 4 * initializedChunks * chunkSize;
}
}
| 4,006 | 26.634483 | 78 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/HadoopShimsFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.InvocationTargetException;
/**
* The factory for getting the proper version of the Hadoop shims.
*/
public class HadoopShimsFactory {
private static final Logger LOG = LoggerFactory.getLogger(HadoopShimsFactory.class);
private static final String CURRENT_SHIM_NAME =
"org.apache.orc.impl.HadoopShimsCurrent";
private static final String PRE_2_6_SHIM_NAME =
"org.apache.orc.impl.HadoopShimsPre2_6";
private static final String PRE_2_7_SHIM_NAME =
"org.apache.orc.impl.HadoopShimsPre2_7";
private static HadoopShims SHIMS = null;
private static HadoopShims createShimByName(String name) {
try {
Class<? extends HadoopShims> cls =
(Class<? extends HadoopShims>) Class.forName(name);
return cls.getDeclaredConstructor().newInstance();
} catch (ClassNotFoundException | NoSuchMethodException | SecurityException |
InstantiationException | IllegalAccessException | IllegalArgumentException |
InvocationTargetException e) {
throw new IllegalStateException("Can't create shims for " + name, e);
}
}
public static synchronized HadoopShims get() {
if (SHIMS == null) {
String[] versionParts = VersionInfo.getVersion().split("[.]");
int major = Integer.parseInt(versionParts[0]);
int minor = Integer.parseInt(versionParts[1]);
if (major < 2 || (major == 2 && minor < 7)) {
LOG.warn("Hadoop " + VersionInfo.getVersion() + " support is deprecated. " +
"Please upgrade to Hadoop 2.7.3 or above.");
}
if (major < 2 || (major == 2 && minor < 3)) {
SHIMS = new HadoopShimsPre2_3();
} else if (major == 2 && minor < 6) {
SHIMS = createShimByName(PRE_2_6_SHIM_NAME);
} else if (major == 2 && minor < 7) {
SHIMS = createShimByName(PRE_2_7_SHIM_NAME);
} else {
SHIMS = createShimByName(CURRENT_SHIM_NAME);
}
}
return SHIMS;
}
}
| 2,903 | 37.210526 | 89 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/IOUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
/**
* This is copied from commons-io project to cut the dependency
* from old Hadoop.
*/
public final class IOUtils {
public static final int DEFAULT_BUFFER_SIZE = 8192;
/**
* Returns a new byte array of size {@link #DEFAULT_BUFFER_SIZE}.
*
* @return a new byte array of size {@link #DEFAULT_BUFFER_SIZE}.
* @since 2.9.0
*/
public static byte[] byteArray() {
return byteArray(DEFAULT_BUFFER_SIZE);
}
/**
* Returns a new byte array of the given size.
*
* TODO Consider guarding or warning against large allocations...
*
* @param size array size.
* @return a new byte array of the given size.
* @since 2.9.0
*/
public static byte[] byteArray(final int size) {
return new byte[size];
}
/**
* Internal byte array buffer.
*/
private static final ThreadLocal<byte[]> SKIP_BYTE_BUFFER =
ThreadLocal.withInitial(IOUtils::byteArray);
/**
* Gets the thread local byte array.
*
* @return the thread local byte array.
*/
static byte[] getByteArray() {
return SKIP_BYTE_BUFFER.get();
}
/**
* Skips the requested number of bytes or fail if there are not enough left.
* <p>
* This allows for the possibility that {@link InputStream#skip(long)} may
* not skip as many bytes as requested (most likely because of reaching EOF).
* <p>
* Note that the implementation uses {@link #skip(InputStream, long)}.
* This means that the method may be considerably less efficient than using the actual skip implementation,
* this is done to guarantee that the correct number of characters are skipped.
* </p>
*
* @param input stream to skip
* @param toSkip the number of bytes to skip
* @throws IOException if there is a problem reading the file
* @throws IllegalArgumentException if toSkip is negative
* @throws EOFException if the number of bytes skipped was incorrect
* @see InputStream#skip(long)
* @since 2.0
*/
public static void skipFully(final InputStream input, final long toSkip)
throws IOException {
if (toSkip < 0) {
throw new IllegalArgumentException("Bytes to skip must not be negative: " + toSkip);
}
final long skipped = skip(input, toSkip);
if (skipped != toSkip) {
throw new EOFException("Bytes to skip: " + toSkip + " actual: " + skipped);
}
}
/**
* Skips bytes from an input byte stream.
* This implementation guarantees that it will read as many bytes
* as possible before giving up; this may not always be the case for
* skip() implementations in subclasses of {@link InputStream}.
* <p>
* Note that the implementation uses {@link InputStream#read(byte[], int, int)} rather
* than delegating to {@link InputStream#skip(long)}.
* This means that the method may be considerably less efficient than using the actual skip implementation,
* this is done to guarantee that the correct number of bytes are skipped.
* </p>
*
* @param input byte stream to skip
* @param toSkip number of bytes to skip.
* @return number of bytes actually skipped.
* @throws IOException if there is a problem reading the file
* @throws IllegalArgumentException if toSkip is negative
* @see <a href="https://issues.apache.org/jira/browse/IO-203">IO-203 - Add skipFully() method for InputStreams</a>
* @since 2.0
*/
public static long skip(final InputStream input, final long toSkip) throws IOException {
if (toSkip < 0) {
throw new IllegalArgumentException("Skip count must be non-negative, actual: " + toSkip);
}
/*
* N.B. no need to synchronize access to SKIP_BYTE_BUFFER: - we don't care if the buffer is created multiple
* times (the data is ignored) - we always use the same size buffer, so if it it is recreated it will still be
* OK (if the buffer size were variable, we would need to synch. to ensure some other thread did not create a
* smaller one)
*/
long remain = toSkip;
while (remain > 0) {
// See https://issues.apache.org/jira/browse/IO-203 for why we use read() rather than delegating to skip()
final byte[] byteArray = getByteArray();
final long n = input.read(byteArray, 0, (int) Math.min(remain, byteArray.length));
if (n < 0) { // EOF
break;
}
remain -= n;
}
return toSkip - remain;
}
}
| 5,305 | 35.847222 | 117 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/InStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.protobuf.CodedInputStream;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import org.apache.orc.CompressionCodec;
import org.apache.orc.EncryptionAlgorithm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.crypto.Cipher;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.IvParameterSpec;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.Key;
import java.util.function.Consumer;
public abstract class InStream extends InputStream {
private static final Logger LOG = LoggerFactory.getLogger(InStream.class);
public static final int PROTOBUF_MESSAGE_MAX_LIMIT = 1024 << 20; // 1GB
protected final Object name;
protected final long offset;
protected long length;
protected DiskRangeList bytes;
// position in the stream (0..length)
protected long position;
public InStream(Object name, long offset, long length) {
this.name = name;
this.offset = offset;
this.length = length;
}
@Override
public String toString() {
return name.toString();
}
@Override
public abstract void close();
/**
* Set the current range
* @param newRange the block that is current
* @param isJump if this was a seek instead of a natural read
*/
protected abstract void setCurrent(DiskRangeList newRange,
boolean isJump);
/**
* Reset the input to a new set of data.
* @param input the input data
*/
protected void reset(DiskRangeList input) {
bytes = input;
while (input != null &&
(input.getEnd() <= offset ||
input.getOffset() > offset + length)) {
input = input.next;
}
if (input == null || input.getOffset() <= offset) {
position = 0;
} else {
position = input.getOffset() - offset;
}
setCurrent(input, true);
}
/**
* Reset the input to a new set of data with a different length.
*
* in some cases, after resetting an UncompressedStream, its actual length is longer than its initial length.
* Prior to ORC-516, InStream.UncompressedStream class had the 'length' field and the length was modifiable in
* the reset() method. It was used in SettableUncompressedStream class in setBuffers() method.
* SettableUncompressedStream was passing 'diskRangeInfo.getTotalLength()' as the length to the reset() method.
* SettableUncompressedStream had been removed from ORC code base, but it is required for Apache Hive and
* Apache Hive manages its own copy of SettableUncompressedStream since upgrading its Apache ORC version to 1.6.7.
* ORC-516 was the root cause of the regression reported in HIVE-27128 - EOFException when reading DATA stream.
* This wrapper method allows to resolve HIVE-27128.
*
* @param input the input data
* @param length new length of the stream
*/
protected void reset(DiskRangeList input, long length) {
this.length = length;
reset(input);
}
public abstract void changeIv(Consumer<byte[]> modifier);
static int getRangeNumber(DiskRangeList list, DiskRangeList current) {
int result = 0;
DiskRangeList range = list;
while (range != null && range != current) {
result += 1;
range = range.next;
}
return result;
}
/**
* Implements a stream over an uncompressed stream.
*/
public static class UncompressedStream extends InStream {
protected ByteBuffer decrypted;
protected DiskRangeList currentRange;
protected long currentOffset;
/**
* Create the stream without calling reset on it.
* This is used for the subclass that needs to do more setup.
* @param name name of the stream
* @param length the number of bytes for the stream
*/
public UncompressedStream(Object name, long offset, long length) {
super(name, offset, length);
}
public UncompressedStream(Object name,
DiskRangeList input,
long offset,
long length) {
super(name, offset, length);
reset(input, length);
}
@Override
public int read() {
if (decrypted == null || decrypted.remaining() == 0) {
if (position == length) {
return -1;
}
setCurrent(currentRange.next, false);
}
position += 1;
return 0xff & decrypted.get();
}
@Override
protected void setCurrent(DiskRangeList newRange, boolean isJump) {
currentRange = newRange;
if (newRange != null) {
// copy the buffer so that we don't change the BufferChunk
decrypted = newRange.getData().slice();
currentOffset = newRange.getOffset();
// Move the position in the ByteBuffer to match the currentOffset,
// which is relative to the stream.
int start = (int) (position + offset - currentOffset);
decrypted.position(start);
// make sure the end of the buffer doesn't go past our stream
decrypted.limit(start + (int) Math.min(decrypted.remaining(),
length - position));
}
}
@Override
public int read(byte[] data, int offset, int length) {
if (decrypted == null || decrypted.remaining() == 0) {
if (position == this.length) {
return -1;
}
setCurrent(currentRange.next, false);
}
int actualLength = Math.min(length, decrypted.remaining());
decrypted.get(data, offset, actualLength);
position += actualLength;
return actualLength;
}
@Override
public int available() {
if (decrypted != null && decrypted.remaining() > 0) {
return decrypted.remaining();
}
return (int) (length - position);
}
@Override
public void close() {
currentRange = null;
position = length;
// explicit de-ref of bytes[]
decrypted = null;
bytes = null;
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
// nothing to do
}
@Override
public void seek(PositionProvider index) throws IOException {
seek(index.getNext());
}
public void seek(long desired) throws IOException {
if (desired == 0 && bytes == null) {
return;
}
// compute the position of the desired point in file
long positionFile = desired + offset;
// If we are seeking inside of the current range, just reposition.
if (currentRange != null && positionFile >= currentRange.getOffset() &&
positionFile < currentRange.getEnd()) {
decrypted.position((int) (positionFile - currentOffset));
position = desired;
} else {
for (DiskRangeList curRange = bytes; curRange != null;
curRange = curRange.next) {
if (curRange.getOffset() <= positionFile &&
(curRange.next == null ? positionFile <= curRange.getEnd() :
positionFile < curRange.getEnd())) {
position = desired;
setCurrent(curRange, true);
return;
}
}
throw new IllegalArgumentException("Seek in " + name + " to " +
desired + " is outside of the data");
}
}
@Override
public String toString() {
return "uncompressed stream " + name + " position: " + position +
" length: " + length + " range: " + getRangeNumber(bytes, currentRange) +
" offset: " + currentRange.getOffset() +
" position: " + (decrypted == null ? 0 : decrypted.position()) +
" limit: " + (decrypted == null ? 0 : decrypted.limit());
}
}
private static ByteBuffer allocateBuffer(int size, boolean isDirect) {
// TODO: use the same pool as the ORC readers
if (isDirect) {
return ByteBuffer.allocateDirect(size);
} else {
return ByteBuffer.allocate(size);
}
}
/**
* Manage the state of the decryption, including the ability to seek.
*/
static class EncryptionState {
private final Object name;
private final Key key;
private final byte[] iv;
private final Cipher cipher;
private final long offset;
private ByteBuffer decrypted;
EncryptionState(Object name, long offset, StreamOptions options) {
this.name = name;
this.offset = offset;
EncryptionAlgorithm algorithm = options.getAlgorithm();
key = options.getKey();
iv = options.getIv();
cipher = algorithm.createCipher();
}
void changeIv(Consumer<byte[]> modifier) {
modifier.accept(iv);
updateIv();
OutStream.logKeyAndIv(name, key, iv);
}
private void updateIv() {
try {
cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(iv));
} catch (InvalidKeyException e) {
throw new IllegalArgumentException("Invalid key on " + name, e);
} catch (InvalidAlgorithmParameterException e) {
throw new IllegalArgumentException("Invalid iv on " + name, e);
}
}
/**
* We are seeking to a new range, so update the cipher to change the IV
* to match. This code assumes that we only support encryption in CTR mode.
* @param offset where we are seeking to in the stream
*/
void changeIv(long offset) {
int blockSize = cipher.getBlockSize();
long encryptionBlocks = offset / blockSize;
long extra = offset % blockSize;
CryptoUtils.clearCounter(iv);
if (encryptionBlocks != 0) {
// Add the encryption blocks into the initial iv, to compensate for
// skipping over decrypting those bytes.
int posn = iv.length - 1;
while (encryptionBlocks > 0) {
long sum = (iv[posn] & 0xff) + encryptionBlocks;
iv[posn--] = (byte) sum;
encryptionBlocks = sum / 0x100;
}
}
updateIv();
// If the range starts at an offset that doesn't match the encryption
// block, we need to advance some bytes within an encryption block.
if (extra > 0) {
try {
byte[] wasted = new byte[(int) extra];
cipher.update(wasted, 0, wasted.length, wasted, 0);
} catch (ShortBufferException e) {
throw new IllegalArgumentException("Short buffer in " + name, e);
}
}
}
/**
* Decrypt the given range into the decrypted buffer. It is assumed that
* the cipher is correctly initialized by changeIv before this is called.
* @param encrypted the bytes to decrypt
* @return a reused ByteBuffer, which is used by each call to decrypt
*/
ByteBuffer decrypt(ByteBuffer encrypted) {
int length = encrypted.remaining();
if (decrypted == null || decrypted.capacity() < length) {
decrypted = ByteBuffer.allocate(length);
} else {
decrypted.clear();
}
try {
int output = cipher.update(encrypted, decrypted);
if (output != length) {
throw new IllegalArgumentException("Problem decrypting " + name +
" at " + offset);
}
} catch (ShortBufferException e) {
throw new IllegalArgumentException("Problem decrypting " + name +
" at " + offset, e);
}
decrypted.flip();
return decrypted;
}
void close() {
decrypted = null;
}
}
/**
* Implements a stream over an encrypted, but uncompressed stream.
*/
public static class EncryptedStream extends UncompressedStream {
private final EncryptionState encrypt;
public EncryptedStream(Object name, DiskRangeList input, long offset, long length,
StreamOptions options) {
super(name, offset, length);
encrypt = new EncryptionState(name, offset, options);
reset(input);
}
@Override
protected void setCurrent(DiskRangeList newRange, boolean isJump) {
currentRange = newRange;
if (newRange != null) {
// what is the position of the start of the newRange?
currentOffset = newRange.getOffset();
ByteBuffer encrypted = newRange.getData().slice();
if (currentOffset < offset) {
int ignoreBytes = (int) (offset - currentOffset);
encrypted.position(ignoreBytes);
currentOffset = offset;
}
if (isJump) {
encrypt.changeIv(currentOffset - offset);
}
if (encrypted.remaining() > length + offset - currentOffset) {
encrypted.limit((int) (length + offset - currentOffset));
}
decrypted = encrypt.decrypt(encrypted);
decrypted.position((int) (position + offset - currentOffset));
}
}
@Override
public void close() {
super.close();
encrypt.close();
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
encrypt.changeIv(modifier);
}
@Override
public String toString() {
return "encrypted " + super.toString();
}
}
public static class CompressedStream extends InStream {
private final int bufferSize;
private ByteBuffer uncompressed;
private final CompressionCodec codec;
protected ByteBuffer compressed;
protected DiskRangeList currentRange;
private boolean isUncompressedOriginal;
protected long currentCompressedStart = -1;
/**
* Create the stream without resetting the input stream.
* This is used in subclasses so they can finish initializing before
* reset is called.
* @param name the name of the stream
* @param length the total number of bytes in the stream
* @param options the options used to read the stream
*/
public CompressedStream(Object name,
long offset,
long length,
StreamOptions options) {
super(name, offset, length);
this.codec = options.codec;
this.bufferSize = options.bufferSize;
}
/**
* Create the stream and initialize the input for the stream.
* @param name the name of the stream
* @param input the input data
* @param length the total length of the stream
* @param options the options to read the data with
*/
public CompressedStream(Object name,
DiskRangeList input,
long offset,
long length,
StreamOptions options) {
super(name, offset, length);
this.codec = options.codec;
this.bufferSize = options.bufferSize;
reset(input);
}
private void allocateForUncompressed(int size, boolean isDirect) {
uncompressed = allocateBuffer(size, isDirect);
}
@Override
protected void setCurrent(DiskRangeList newRange,
boolean isJump) {
currentRange = newRange;
if (newRange != null) {
compressed = newRange.getData().slice();
int pos = (int) (position + offset - newRange.getOffset());
compressed.position(pos);
compressed.limit(pos + (int) Math.min(compressed.remaining(),
length - position));
}
}
private int readHeaderByte() {
while (currentRange != null &&
(compressed == null || compressed.remaining() <= 0)) {
setCurrent(currentRange.next, false);
}
if (compressed != null && compressed.remaining() > 0) {
position += 1;
return compressed.get() & 0xff;
} else {
throw new IllegalStateException("Can't read header at " + this);
}
}
private void readHeader() throws IOException {
currentCompressedStart = this.position;
int b0 = readHeaderByte();
int b1 = readHeaderByte();
int b2 = readHeaderByte();
boolean isOriginal = (b0 & 0x01) == 1;
int chunkLength = (b2 << 15) | (b1 << 7) | (b0 >> 1);
if (chunkLength > bufferSize) {
throw new IllegalArgumentException("Buffer size too small. size = " +
bufferSize + " needed = " + chunkLength + " in " + name);
}
ByteBuffer slice = this.slice(chunkLength);
if (isOriginal) {
uncompressed = slice;
isUncompressedOriginal = true;
} else {
if (isUncompressedOriginal) {
// Since the previous chunk was uncompressed, allocate the buffer and set original false
allocateForUncompressed(bufferSize, slice.isDirect());
isUncompressedOriginal = false;
} else if (uncompressed == null) {
// If the buffer was not allocated then allocate the same
allocateForUncompressed(bufferSize, slice.isDirect());
} else {
// Since the buffer is already allocated just clear the same
uncompressed.clear();
}
codec.decompress(slice, uncompressed);
}
}
@Override
public int read() throws IOException {
if (!ensureUncompressed()) {
return -1;
}
return 0xff & uncompressed.get();
}
@Override
public int read(byte[] data, int offset, int length) throws IOException {
if (!ensureUncompressed()) {
return -1;
}
int actualLength = Math.min(length, uncompressed.remaining());
uncompressed.get(data, offset, actualLength);
return actualLength;
}
private boolean ensureUncompressed() throws IOException {
while (uncompressed == null || uncompressed.remaining() == 0) {
if (position == this.length) {
return false;
}
readHeader();
}
return true;
}
@Override
public int available() throws IOException {
if (!ensureUncompressed()) {
return 0;
}
return uncompressed.remaining();
}
@Override
public void close() {
uncompressed = null;
compressed = null;
currentRange = null;
position = length;
bytes = null;
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
// nothing to do
}
@Override
public void seek(PositionProvider index) throws IOException {
boolean seeked = seek(index.getNext());
long uncompressedBytes = index.getNext();
if (!seeked) {
if (uncompressed != null) {
// Only reposition uncompressed
uncompressed.position((int) uncompressedBytes);
}
// uncompressed == null should not happen as !seeked would mean that a previous
// readHeader has taken place
} else {
if (uncompressedBytes != 0) {
// Decompress compressed as a seek has taken place and position uncompressed
readHeader();
uncompressed.position(uncompressed.position() +
(int) uncompressedBytes);
} else if (uncompressed != null) {
// mark the uncompressed buffer as done
uncompressed.position(uncompressed.limit());
}
}
}
/* slices a read only contiguous buffer of chunkLength */
private ByteBuffer slice(int chunkLength) throws IOException {
int len = chunkLength;
final DiskRangeList oldRange = currentRange;
final long oldPosition = position;
ByteBuffer slice;
if (compressed.remaining() >= len) {
slice = compressed.slice();
// simple case
slice.limit(len);
position += len;
compressed.position(compressed.position() + len);
return slice;
} else if (currentRange.next == null) {
// nothing has been modified yet
throw new IOException("EOF in " + this + " while trying to read " +
chunkLength + " bytes");
}
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(
"Crossing into next BufferChunk because compressed only has %d bytes (needs %d)",
compressed.remaining(), len));
}
// we need to consolidate 2 or more buffers into 1
// first copy out compressed buffers
ByteBuffer copy = allocateBuffer(chunkLength, compressed.isDirect());
position += compressed.remaining();
len -= compressed.remaining();
copy.put(compressed);
while (currentRange.next != null) {
setCurrent(currentRange.next, false);
LOG.debug("Read slow-path, >1 cross block reads with {}", this);
if (compressed.remaining() >= len) {
slice = compressed.slice();
slice.limit(len);
copy.put(slice);
position += len;
compressed.position(compressed.position() + len);
copy.flip();
return copy;
}
position += compressed.remaining();
len -= compressed.remaining();
copy.put(compressed);
}
// restore offsets for exception clarity
position = oldPosition;
setCurrent(oldRange, true);
throw new IOException("EOF in " + this + " while trying to read " +
chunkLength + " bytes");
}
/**
* Seek to the desired chunk based on the input position.
*
* @param desired position in the compressed stream
* @return Indicates whether a seek was performed or not
* @throws IOException when seeking outside the stream bounds
*/
boolean seek(long desired) throws IOException {
if (desired == 0 && bytes == null) {
return false;
}
if (desired == currentCompressedStart) {
// Header already at the required position
return false;
}
long posn = desired + offset;
for (DiskRangeList range = bytes; range != null; range = range.next) {
if (range.getOffset() <= posn &&
(range.next == null ? posn <= range.getEnd() :
posn < range.getEnd())) {
position = desired;
setCurrent(range, true);
return true;
}
}
throw new IOException("Seek outside of data in " + this + " to " + desired);
}
private String rangeString() {
StringBuilder builder = new StringBuilder();
int i = 0;
for (DiskRangeList range = bytes; range != null; range = range.next){
if (i != 0) {
builder.append("; ");
}
builder.append(" range ");
builder.append(i);
builder.append(" = ");
builder.append(range.getOffset());
builder.append(" to ");
builder.append(range.getEnd());
++i;
}
return builder.toString();
}
@Override
public String toString() {
return "compressed stream " + name + " position: " + position +
" length: " + length + " range: " + getRangeNumber(bytes, currentRange) +
" offset: " + (compressed == null ? 0 : compressed.position()) +
" limit: " + (compressed == null ? 0 : compressed.limit()) +
rangeString() +
(uncompressed == null ? "" :
" uncompressed: " + uncompressed.position() + " to " +
uncompressed.limit());
}
}
private static class EncryptedCompressedStream extends CompressedStream {
private final EncryptionState encrypt;
EncryptedCompressedStream(Object name,
DiskRangeList input,
long offset,
long length,
StreamOptions options) {
super(name, offset, length, options);
encrypt = new EncryptionState(name, offset, options);
reset(input);
}
@Override
protected void setCurrent(DiskRangeList newRange, boolean isJump) {
currentRange = newRange;
if (newRange != null) {
// what is the position of the start of the newRange?
long rangeOffset = newRange.getOffset();
int ignoreBytes = 0;
ByteBuffer encrypted = newRange.getData().slice();
if (rangeOffset < offset) {
ignoreBytes = (int) (offset - rangeOffset);
encrypted.position(ignoreBytes);
}
if (isJump) {
encrypt.changeIv(ignoreBytes + rangeOffset - offset);
}
encrypted.limit(ignoreBytes +
(int) Math.min(encrypted.remaining(), length));
compressed = encrypt.decrypt(encrypted);
if (position + offset > rangeOffset + ignoreBytes) {
compressed.position((int) (position + offset - rangeOffset - ignoreBytes));
}
}
}
@Override
public void close() {
super.close();
encrypt.close();
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
encrypt.changeIv(modifier);
}
@Override
public String toString() {
return "encrypted " + super.toString();
}
}
public abstract void seek(PositionProvider index) throws IOException;
public static class StreamOptions implements Cloneable {
private CompressionCodec codec;
private int bufferSize;
private EncryptionAlgorithm algorithm;
private Key key;
private byte[] iv;
public StreamOptions(StreamOptions other) {
codec = other.codec;
bufferSize = other.bufferSize;
algorithm = other.algorithm;
key = other.key;
iv = other.iv == null ? null : other.iv.clone();
}
public StreamOptions() {
}
public StreamOptions withCodec(CompressionCodec value) {
this.codec = value;
return this;
}
public StreamOptions withBufferSize(int value) {
bufferSize = value;
return this;
}
public StreamOptions withEncryption(EncryptionAlgorithm algorithm,
Key key,
byte[] iv) {
this.algorithm = algorithm;
this.key = key;
this.iv = iv;
return this;
}
public boolean isCompressed() {
return codec != null;
}
public CompressionCodec getCodec() {
return codec;
}
public int getBufferSize() {
return bufferSize;
}
public EncryptionAlgorithm getAlgorithm() {
return algorithm;
}
public Key getKey() {
return key;
}
public byte[] getIv() {
return iv;
}
@Override
public StreamOptions clone() {
try {
StreamOptions clone = (StreamOptions) super.clone();
if (clone.codec != null) {
// Make sure we don't share the same codec between two readers.
clone.codec = OrcCodecPool.getCodec(codec.getKind());
}
return clone;
} catch (CloneNotSupportedException e) {
throw new UnsupportedOperationException("uncloneable", e);
}
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("compress: ");
buffer.append(codec == null ? "none" : codec.getKind());
buffer.append(", buffer size: ");
buffer.append(bufferSize);
if (key != null) {
buffer.append(", encryption: ");
buffer.append(algorithm);
}
return buffer.toString();
}
}
public static StreamOptions options() {
return new StreamOptions();
}
/**
* Create an input stream from a list of disk ranges with data.
* @param name the name of the stream
* @param input the list of ranges of bytes for the stream; from disk or cache
* @param offset the first byte offset of the stream
* @param length the length in bytes of the stream
* @param options the options to read with
* @return an input stream
*/
public static InStream create(Object name,
DiskRangeList input,
long offset,
long length,
StreamOptions options) {
LOG.debug("Reading {} with {} from {} for {}", name, options, offset,
length);
if (options == null || options.codec == null) {
if (options == null || options.key == null) {
return new UncompressedStream(name, input, offset, length);
} else {
OutStream.logKeyAndIv(name, options.getKey(), options.getIv());
return new EncryptedStream(name, input, offset, length, options);
}
} else if (options.key == null) {
return new CompressedStream(name, input, offset, length, options);
} else {
OutStream.logKeyAndIv(name, options.getKey(), options.getIv());
return new EncryptedCompressedStream(name, input, offset, length, options);
}
}
/**
* Create an input stream from a list of disk ranges with data.
* @param name the name of the stream
* @param input the list of ranges of bytes for the stream; from disk or cache
* @param length the length in bytes of the stream
* @return an input stream
*/
public static InStream create(Object name,
DiskRangeList input,
long offset,
long length) {
return create(name, input, offset, length, null);
}
/**
* Creates coded input stream (used for protobuf message parsing) with higher
* message size limit.
*
* @param inStream the stream to wrap.
* @return coded input stream
*/
public static CodedInputStream createCodedInputStream(InStream inStream) {
CodedInputStream codedInputStream = CodedInputStream.newInstance(inStream);
codedInputStream.setSizeLimit(PROTOBUF_MESSAGE_MAX_LIMIT);
return codedInputStream;
}
}
| 30,318 | 31.883948 | 116 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/IntegerReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import java.io.IOException;
/**
* Interface for reading integers.
*/
public interface IntegerReader {
/**
* Seek to the position provided by index.
* @param index
* @throws IOException
*/
void seek(PositionProvider index) throws IOException;
/**
* Skip number of specified rows.
* @param numValues
* @throws IOException
*/
void skip(long numValues) throws IOException;
/**
* Check if there are any more values left.
* @return
* @throws IOException
*/
boolean hasNext() throws IOException;
/**
* Return the next available value.
* @return
* @throws IOException
*/
long next() throws IOException;
/**
* Return the next available vector for values.
* @param column the column being read
* @param data the vector to read into
* @param length the number of numbers to read
* @throws IOException
*/
void nextVector(ColumnVector column,
long[] data,
int length
) throws IOException;
/**
* Return the next available vector for values. Does not change the
* value of column.isRepeating.
* @param column the column being read
* @param data the vector to read into
* @param length the number of numbers to read
* @throws IOException
*/
void nextVector(ColumnVector column,
int[] data,
int length
) throws IOException;
}
| 2,334 | 27.13253 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/IntegerWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.util.function.Consumer;
/**
* Interface for writing integers.
*/
public interface IntegerWriter {
/**
* Get position from the stream.
* @param recorder
* @throws IOException
*/
void getPosition(PositionRecorder recorder) throws IOException;
/**
* Write the integer value
* @param value
* @throws IOException
*/
void write(long value) throws IOException;
/**
* Flush the buffer
* @throws IOException
*/
void flush() throws IOException;
/**
* Estimate the amount of memory being used.
* @return number of bytes
*/
long estimateMemory();
void changeIv(Consumer<byte[]> modifier);
}
| 1,524 | 25.754386 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/MaskDescriptionImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.DataMask;
import org.apache.orc.DataMaskDescription;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
public class MaskDescriptionImpl implements DataMaskDescription,
Comparable<MaskDescriptionImpl> {
private int id;
private final String name;
private final String[] parameters;
private final List<TypeDescription> columns = new ArrayList<>();
public MaskDescriptionImpl(String name,
String... parameters) {
this.name = name;
this.parameters = parameters == null ? new String[0] : parameters;
}
public MaskDescriptionImpl(int id,
OrcProto.DataMask mask) {
this.id = id;
this.name = mask.getName();
this.parameters = new String[mask.getMaskParametersCount()];
for(int p=0; p < parameters.length; ++p) {
parameters[p] = mask.getMaskParameters(p);
}
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != getClass()) {
return false;
} else {
return compareTo((MaskDescriptionImpl) other) == 0;
}
}
public void addColumn(TypeDescription column) {
columns.add(column);
}
public void setId(int id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public String[] getParameters() {
return parameters;
}
@Override
public TypeDescription[] getColumns() {
TypeDescription[] result = columns.toArray(new TypeDescription[0]);
// sort the columns by their ids
Arrays.sort(result, Comparator.comparingInt(TypeDescription::getId));
return result;
}
public int getId() {
return id;
}
public DataMask create(TypeDescription schema,
DataMask.MaskOverrides overrides) {
return DataMask.Factory.build(this, schema, overrides);
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("mask ");
buffer.append(getName());
buffer.append('(');
String[] parameters = getParameters();
if (parameters != null) {
for(int p=0; p < parameters.length; ++p) {
if (p != 0) {
buffer.append(", ");
}
buffer.append(parameters[p]);
}
}
buffer.append(')');
return buffer.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + Arrays.hashCode(parameters);
return result;
}
@Override
public int compareTo(@NotNull MaskDescriptionImpl other) {
if (other == this) {
return 0;
}
int result = name.compareTo(other.name);
int p = 0;
while (result == 0 &&
p < parameters.length && p < other.parameters.length) {
result = parameters[p].compareTo(other.parameters[p]);
p += 1;
}
if (result == 0) {
result = Integer.compare(parameters.length, other.parameters.length);
}
return result;
}
}
| 4,091 | 26.836735 | 77 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/MemoryManager.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
/**
* Shim for backwards compatibility with Hive
*/
@Deprecated
public class MemoryManager extends MemoryManagerImpl {
public MemoryManager(Configuration conf) {
super(conf);
}
}
| 1,073 | 31.545455 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/MemoryManagerImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.orc.MemoryManager;
import org.apache.orc.OrcConf;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
/**
* Implements a memory manager that keeps a global context of how many ORC
* writers there are and manages the memory between them. For use cases with
* dynamic partitions, it is easy to end up with many writers in the same task.
* By managing the size of each allocation, we try to cut down the size of each
* allocation and keep the task from running out of memory.
* <p>
* This class is not thread safe, but is re-entrant - ensure creation and all
* invocations are triggered from the same thread.
*/
public class MemoryManagerImpl implements MemoryManager {
private final long totalMemoryPool;
private final Map<Path, WriterInfo> writerList = new HashMap<>();
private final AtomicLong totalAllocation = new AtomicLong(0);
private static class WriterInfo {
long allocation;
WriterInfo(long allocation) {
this.allocation = allocation;
}
}
/**
* Create the memory manager.
* @param conf use the configuration to find the maximum size of the memory
* pool.
*/
public MemoryManagerImpl(Configuration conf) {
this(Math.round(ManagementFactory.getMemoryMXBean().
getHeapMemoryUsage().getMax() * OrcConf.MEMORY_POOL.getDouble(conf)));
}
/**
* Create the memory manager
* @param poolSize the size of memory to use
*/
public MemoryManagerImpl(long poolSize) {
totalMemoryPool = poolSize;
}
/**
* Add a new writer's memory allocation to the pool. We use the path
* as a unique key to ensure that we don't get duplicates.
* @param path the file that is being written
* @param requestedAllocation the requested buffer size
*/
@Override
public synchronized void addWriter(Path path, long requestedAllocation,
Callback callback) throws IOException {
WriterInfo oldVal = writerList.get(path);
// this should always be null, but we handle the case where the memory
// manager wasn't told that a writer wasn't still in use and the task
// starts writing to the same path.
if (oldVal == null) {
oldVal = new WriterInfo(requestedAllocation);
writerList.put(path, oldVal);
totalAllocation.addAndGet(requestedAllocation);
} else {
// handle a new writer that is writing to the same path
totalAllocation.addAndGet(requestedAllocation - oldVal.allocation);
oldVal.allocation = requestedAllocation;
}
}
/**
* Remove the given writer from the pool.
* @param path the file that has been closed
*/
@Override
public synchronized void removeWriter(Path path) throws IOException {
WriterInfo val = writerList.remove(path);
if (val != null) {
totalAllocation.addAndGet(-val.allocation);
}
}
/**
* Get the total pool size that is available for ORC writers.
* @return the number of bytes in the pool
*/
public long getTotalMemoryPool() {
return totalMemoryPool;
}
/**
* The scaling factor for each allocation to ensure that the pool isn't
* oversubscribed.
* @return a fraction between 0.0 and 1.0 of the requested size that is
* available for each writer.
*/
public double getAllocationScale() {
long alloc = totalAllocation.get();
return alloc <= totalMemoryPool ? 1.0 : (double) totalMemoryPool / alloc;
}
@Override
public void addedRow(int rows) throws IOException {
// PASS
}
/**
* Obsolete method left for Hive, which extends this class.
* @deprecated remove this method
*/
public void notifyWriters() throws IOException {
// PASS
}
@Override
public long checkMemory(long previous, Callback writer) throws IOException {
long current = totalAllocation.get();
if (current != previous) {
writer.checkMemory(getAllocationScale());
}
return current;
}
}
| 4,950 | 32.006667 | 79 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OrcAcidUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.Reader;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.StandardCharsets;
public class OrcAcidUtils {
public static final String ACID_STATS = "hive.acid.stats";
public static final String DELTA_SIDE_FILE_SUFFIX = "_flush_length";
/**
* Get the filename of the ORC ACID side file that contains the lengths
* of the intermediate footers.
* @param main the main ORC filename
* @return the name of the side file
*/
public static Path getSideFile(Path main) {
return new Path(main + DELTA_SIDE_FILE_SUFFIX);
}
/**
* Read the side file to get the last flush length.
* @param fs the file system to use
* @param deltaFile the path of the delta file
* @return the maximum size of the file to use
* @throws IOException
*/
public static long getLastFlushLength(FileSystem fs,
Path deltaFile) throws IOException {
Path lengths = getSideFile(deltaFile);
long result = Long.MAX_VALUE;
if(!fs.exists(lengths)) {
return result;
}
try (FSDataInputStream stream = fs.open(lengths)) {
result = -1;
while (stream.available() > 0) {
result = stream.readLong();
}
return result;
} catch (IOException ioe) {
return result;
}
}
private static final Charset utf8 = StandardCharsets.UTF_8;
private static final CharsetDecoder utf8Decoder = utf8.newDecoder();
public static AcidStats parseAcidStats(Reader reader) {
if (reader.hasMetadataValue(ACID_STATS)) {
try {
ByteBuffer val = reader.getMetadataValue(ACID_STATS).duplicate();
return new AcidStats(utf8Decoder.decode(val).toString());
} catch (CharacterCodingException e) {
throw new IllegalArgumentException("Bad string encoding for " +
ACID_STATS, e);
}
} else {
return null;
}
}
}
| 3,004 | 32.388889 | 76 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OrcCodecPool.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
/**
* A clone of Hadoop codec pool for ORC; cause it has its own codecs...
*/
public final class OrcCodecPool {
private static final Logger LOG = LoggerFactory.getLogger(OrcCodecPool.class);
/**
* A global decompressor pool used to save the expensive
* construction/destruction of (possibly native) decompression codecs.
*/
private static final ConcurrentHashMap<CompressionKind, List<CompressionCodec>> POOL =
new ConcurrentHashMap<>();
private static final int MAX_PER_KIND = 32;
public static CompressionCodec getCodec(CompressionKind kind) {
if (kind == CompressionKind.NONE) return null;
CompressionCodec codec = null;
List<CompressionCodec> codecList = POOL.get(kind);
if (codecList != null) {
synchronized (codecList) {
if (!codecList.isEmpty()) {
codec = codecList.remove(codecList.size() - 1);
}
}
}
if (codec == null) {
codec = WriterImpl.createCodec(kind);
LOG.debug("Got brand-new codec {}", kind);
} else {
LOG.debug("Got recycled codec");
}
return codec;
}
/**
* Returns the codec to the pool or closes it, suppressing exceptions.
* @param kind Compression kind.
* @param codec Codec.
*/
public static void returnCodec(CompressionKind kind, CompressionCodec codec) {
if (codec == null) {
return;
}
assert kind != CompressionKind.NONE;
try {
codec.reset();
List<CompressionCodec> list = POOL.get(kind);
if (list == null) {
List<CompressionCodec> newList = new ArrayList<>();
List<CompressionCodec> oldList = POOL.putIfAbsent(kind, newList);
list = (oldList == null) ? newList : oldList;
}
synchronized (list) {
if (list.size() < MAX_PER_KIND) {
list.add(codec);
return;
}
}
// We didn't add the codec to the list.
codec.destroy();
} catch (Exception ex) {
LOG.error("Ignoring codec cleanup error", ex);
}
}
public static int getPoolSize(CompressionKind kind) {
if (kind == CompressionKind.NONE) return 0;
List<CompressionCodec> codecList = POOL.get(kind);
if (codecList == null) return 0;
synchronized (codecList) {
return codecList.size();
}
}
/**
* Clear the codec pool. Mostly used for testing.
*/
public static void clear() {
POOL.clear();
}
private OrcCodecPool() {
// prevent instantiation
}
}
| 3,529 | 29.695652 | 88 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OrcFilterContextImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.TypeDescription;
import org.jetbrains.annotations.NotNull;
import java.util.HashMap;
import java.util.Map;
/**
* This defines the input for any filter operation. This is an extension of
* [[{@link VectorizedRowBatch}]] with schema.
* <p>
* This offers a convenience method of finding the column vector from a given column name
* that the filters can invoke to get access to the column vector.
*/
public class OrcFilterContextImpl implements OrcFilterContext {
private VectorizedRowBatch batch = null;
// Cache of field to ColumnVector, this is reset everytime the batch reference changes
private final Map<String, ColumnVector[]> vectors;
private final TypeDescription readSchema;
private final boolean isSchemaCaseAware;
public OrcFilterContextImpl(TypeDescription readSchema, boolean isSchemaCaseAware) {
this.readSchema = readSchema;
this.isSchemaCaseAware = isSchemaCaseAware;
this.vectors = new HashMap<>();
}
public OrcFilterContext setBatch(@NotNull VectorizedRowBatch batch) {
if (batch != this.batch) {
this.batch = batch;
vectors.clear();
}
return this;
}
/**
* For testing only
* @return The batch reference against which the cache is maintained
*/
VectorizedRowBatch getBatch() {
return batch;
}
@Override
public void setFilterContext(boolean selectedInUse, int[] selected, int selectedSize) {
batch.setFilterContext(selectedInUse, selected, selectedSize);
}
@Override
public boolean validateSelected() {
return batch.validateSelected();
}
@Override
public int[] updateSelected(int i) {
return batch.updateSelected(i);
}
@Override
public void setSelectedInUse(boolean b) {
batch.setSelectedInUse(b);
}
@Override
public void setSelected(int[] ints) {
batch.setSelected(ints);
}
@Override
public void setSelectedSize(int i) {
batch.setSelectedSize(i);
}
@Override
public void reset() {
batch.reset();
}
@Override
public boolean isSelectedInUse() {
return batch.isSelectedInUse();
}
@Override
public int[] getSelected() {
return batch.getSelected();
}
@Override
public int getSelectedSize() {
return batch.getSelectedSize();
}
// For testing only
public ColumnVector[] getCols() {
return batch.cols;
}
@Override
public ColumnVector[] findColumnVector(String name) {
return vectors.computeIfAbsent(name,
key -> ParserUtils.findColumnVectors(readSchema,
new ParserUtils.StringPosition(key),
isSchemaCaseAware, batch));
}
}
| 3,667 | 27.434109 | 89 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OrcIndex.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.OrcProto;
public final class OrcIndex {
OrcProto.RowIndex[] rowGroupIndex;
OrcProto.Stream.Kind[] bloomFilterKinds;
OrcProto.BloomFilterIndex[] bloomFilterIndex;
public OrcIndex(OrcProto.RowIndex[] rgIndex,
OrcProto.Stream.Kind[] bloomFilterKinds,
OrcProto.BloomFilterIndex[] bfIndex) {
this.rowGroupIndex = rgIndex;
this.bloomFilterKinds = bloomFilterKinds;
this.bloomFilterIndex = bfIndex;
}
public OrcProto.RowIndex[] getRowGroupIndex() {
return rowGroupIndex;
}
public OrcProto.BloomFilterIndex[] getBloomFilterIndex() {
return bloomFilterIndex;
}
public OrcProto.Stream.Kind[] getBloomFilterKinds() {
return bloomFilterKinds;
}
public void setRowGroupIndex(OrcProto.RowIndex[] rowGroupIndex) {
this.rowGroupIndex = rowGroupIndex;
}
}
| 1,694 | 31.596154 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OrcTail.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.Reader;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
// TODO: Make OrcTail implement FileMetadata or Reader interface
public final class OrcTail {
private static final Logger LOG = LoggerFactory.getLogger(OrcTail.class);
// postscript + footer - Serialized in OrcSplit
private final OrcProto.FileTail fileTail;
// serialized representation of metadata, footer and postscript
private final BufferChunk serializedTail;
private final TypeDescription schema;
// used to invalidate cache entries
private final long fileModificationTime;
private final Reader reader;
public OrcTail(OrcProto.FileTail fileTail,
ByteBuffer serializedTail) throws IOException {
this(fileTail, serializedTail, -1);
}
public OrcTail(OrcProto.FileTail fileTail, ByteBuffer serializedTail,
long fileModificationTime) throws IOException {
this(fileTail,
new BufferChunk(serializedTail, getStripeStatisticsOffset(fileTail)),
fileModificationTime);
}
public OrcTail(OrcProto.FileTail fileTail, BufferChunk serializedTail,
long fileModificationTime) throws IOException {
this(fileTail, serializedTail, fileModificationTime, null);
}
public OrcTail(OrcProto.FileTail fileTail, BufferChunk serializedTail,
long fileModificationTime, Reader reader) throws IOException {
this.fileTail = fileTail;
this.serializedTail = serializedTail;
this.fileModificationTime = fileModificationTime;
List<OrcProto.Type> types = getTypes();
OrcUtils.isValidTypeTree(types, 0);
this.schema = OrcUtils.convertTypeFromProtobuf(types, 0);
this.reader = reader;
}
public ByteBuffer getSerializedTail() {
if (serializedTail.next == null) {
return serializedTail.getData();
} else {
// make a single buffer...
int len = 0;
for(BufferChunk chunk=serializedTail;
chunk != null;
chunk = (BufferChunk) chunk.next) {
len += chunk.getLength();
}
ByteBuffer result = ByteBuffer.allocate(len);
for(BufferChunk chunk=serializedTail;
chunk != null;
chunk = (BufferChunk) chunk.next) {
ByteBuffer tmp = chunk.getData();
result.put(tmp.array(), tmp.arrayOffset() + tmp.position(),
tmp.remaining());
}
result.flip();
return result;
}
}
/**
* Gets the buffer chunks that correspond to the stripe statistics,
* file tail, and post script.
* @return the shared buffers with the contents of the file tail
*/
public BufferChunk getTailBuffer() {
return serializedTail;
}
public long getFileModificationTime() {
return fileModificationTime;
}
public OrcProto.Footer getFooter() {
return fileTail.getFooter();
}
public OrcProto.PostScript getPostScript() {
return fileTail.getPostscript();
}
public OrcFile.WriterVersion getWriterVersion() {
OrcProto.PostScript ps = fileTail.getPostscript();
OrcProto.Footer footer = fileTail.getFooter();
OrcFile.WriterImplementation writer =
OrcFile.WriterImplementation.from(footer.getWriter());
return OrcFile.WriterVersion.from(writer, ps.getWriterVersion());
}
public List<StripeInformation> getStripes() {
return OrcUtils.convertProtoStripesToStripes(getFooter().getStripesList());
}
public CompressionKind getCompressionKind() {
return CompressionKind.valueOf(fileTail.getPostscript().getCompression().name());
}
public int getCompressionBufferSize() {
OrcProto.PostScript postScript = fileTail.getPostscript();
return ReaderImpl.getCompressionBlockSize(postScript);
}
public int getMetadataSize() {
return (int) getPostScript().getMetadataLength();
}
public List<OrcProto.Type> getTypes() {
return getFooter().getTypesList();
}
public TypeDescription getSchema() {
return schema;
}
public OrcProto.FileTail getFileTail() {
return fileTail;
}
static long getMetadataOffset(OrcProto.FileTail tail) {
OrcProto.PostScript ps = tail.getPostscript();
return tail.getFileLength()
- 1
- tail.getPostscriptLength()
- ps.getFooterLength()
- ps.getMetadataLength();
}
static long getStripeStatisticsOffset(OrcProto.FileTail tail) {
OrcProto.PostScript ps = tail.getPostscript();
return getMetadataOffset(tail) - ps.getStripeStatisticsLength();
}
/**
* Get the file offset of the metadata section of footer.
* @return the byte offset of the start of the metadata
*/
public long getMetadataOffset() {
return getMetadataOffset(fileTail);
}
/**
* Get the file offset of the stripe statistics.
* @return the byte offset of the start of the stripe statistics
*/
public long getStripeStatisticsOffset() {
return getStripeStatisticsOffset(fileTail);
}
/**
* Get the position of the end of the file.
* @return the byte length of the file
*/
public long getFileLength() {
return fileTail.getFileLength();
}
public OrcProto.FileTail getMinimalFileTail() {
OrcProto.FileTail.Builder fileTailBuilder = OrcProto.FileTail.newBuilder(fileTail);
OrcProto.Footer.Builder footerBuilder = OrcProto.Footer.newBuilder(fileTail.getFooter());
footerBuilder.clearStatistics();
fileTailBuilder.setFooter(footerBuilder.build());
return fileTailBuilder.build();
}
/**
* Get the stripe statistics from the file tail.
* This code is for compatibility with ORC 1.5.
* @return the stripe statistics
* @deprecated the user should use Reader.getStripeStatistics instead.
*/
public List<StripeStatistics> getStripeStatistics() throws IOException {
if (reader == null) {
LOG.warn("Please use Reader.getStripeStatistics or give `Reader` to OrcTail constructor.");
return new ArrayList<>();
} else {
return reader.getStripeStatistics();
}
}
}
| 7,137 | 31.593607 | 97 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/OutStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.CompressionCodec;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.impl.writer.StreamOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.IvParameterSpec;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.Key;
import java.util.function.Consumer;
/**
* The output stream for writing to ORC files.
* It handles both compression and encryption.
*/
public class OutStream extends PositionedOutputStream {
private static final Logger LOG = LoggerFactory.getLogger(OutStream.class);
// This logger will log the local keys to be printed to the logs at debug.
// Be *extremely* careful turning it on.
static final Logger KEY_LOGGER = LoggerFactory.getLogger("org.apache.orc.keys");
public static final int HEADER_SIZE = 3;
private final Object name;
private final PhysicalWriter.OutputReceiver receiver;
/**
* Stores the uncompressed bytes that have been serialized, but not
* compressed yet. When this fills, we compress the entire buffer.
*/
private ByteBuffer current = null;
/**
* Stores the compressed bytes until we have a full buffer and then outputs
* them to the receiver. If no compression is being done, this (and overflow)
* will always be null and the current buffer will be sent directly to the
* receiver.
*/
private ByteBuffer compressed = null;
/**
* Since the compressed buffer may start with contents from previous
* compression blocks, we allocate an overflow buffer so that the
* output of the codec can be split between the two buffers. After the
* compressed buffer is sent to the receiver, the overflow buffer becomes
* the new compressed buffer.
*/
private ByteBuffer overflow = null;
private final int bufferSize;
private final CompressionCodec codec;
private final CompressionCodec.Options options;
private long compressedBytes = 0;
private long uncompressedBytes = 0;
private final Cipher cipher;
private final Key key;
private final byte[] iv;
public OutStream(Object name,
StreamOptions options,
PhysicalWriter.OutputReceiver receiver) {
this.name = name;
this.bufferSize = options.getBufferSize();
this.codec = options.getCodec();
this.options = options.getCodecOptions();
this.receiver = receiver;
if (options.isEncrypted()) {
this.cipher = options.getAlgorithm().createCipher();
this.key = options.getKey();
this.iv = options.getIv();
resetState();
} else {
this.cipher = null;
this.key = null;
this.iv = null;
}
LOG.debug("Stream {} written to with {}", name, options);
logKeyAndIv(name, key, iv);
}
static void logKeyAndIv(Object name, Key key, byte[] iv) {
if (iv != null && KEY_LOGGER.isDebugEnabled()) {
KEY_LOGGER.debug("Stream: {} Key: {} IV: {}", name,
new BytesWritable(key.getEncoded()), new BytesWritable(iv));
}
}
/**
* Change the current Initialization Vector (IV) for the encryption.
* @param modifier a function to modify the IV in place
*/
@Override
public void changeIv(Consumer<byte[]> modifier) {
if (iv != null) {
modifier.accept(iv);
resetState();
logKeyAndIv(name, key, iv);
}
}
/**
* Reset the cipher after changing the IV.
*/
private void resetState() {
try {
cipher.init(Cipher.ENCRYPT_MODE, key, new IvParameterSpec(iv));
} catch (InvalidKeyException e) {
throw new IllegalStateException("ORC bad encryption key for " + this, e);
} catch (InvalidAlgorithmParameterException e) {
throw new IllegalStateException("ORC bad encryption parameter for " + this, e);
}
}
/**
* When a buffer is done, we send it to the receiver to store.
* If we are encrypting, encrypt the buffer before we pass it on.
* @param buffer the buffer to store
*/
void outputBuffer(ByteBuffer buffer) throws IOException {
if (cipher != null) {
ByteBuffer output = buffer.duplicate();
int len = buffer.remaining();
try {
int encrypted = cipher.update(buffer, output);
output.flip();
receiver.output(output);
if (encrypted != len) {
throw new IllegalArgumentException("Encryption of incomplete buffer "
+ len + " -> " + encrypted + " in " + this);
}
} catch (ShortBufferException e) {
throw new IOException("Short buffer in encryption in " + this, e);
}
} else {
receiver.output(buffer);
}
}
/**
* Ensure that the cipher didn't save any data.
* The next call should be to changeIv to restart the encryption on a new IV.
*/
void finishEncryption() {
try {
byte[] finalBytes = cipher.doFinal();
if (finalBytes != null && finalBytes.length != 0) {
throw new IllegalStateException("We shouldn't have remaining bytes " + this);
}
} catch (IllegalBlockSizeException e) {
throw new IllegalArgumentException("Bad block size", e);
} catch (BadPaddingException e) {
throw new IllegalArgumentException("Bad padding", e);
}
}
/**
* Write the length of the compressed bytes. Life is much easier if the
* header is constant length, so just use 3 bytes. Considering most of the
* codecs want between 32k (snappy) and 256k (lzo, zlib), 3 bytes should
* be plenty. We also use the low bit for whether it is the original or
* compressed bytes.
* @param buffer the buffer to write the header to
* @param position the position in the buffer to write at
* @param val the size in the file
* @param original is it uncompressed
*/
private static void writeHeader(ByteBuffer buffer,
int position,
int val,
boolean original) {
buffer.put(position, (byte) ((val << 1) + (original ? 1 : 0)));
buffer.put(position + 1, (byte) (val >> 7));
buffer.put(position + 2, (byte) (val >> 15));
}
private void getNewInputBuffer() {
if (codec == null) {
current = ByteBuffer.allocate(bufferSize);
} else {
current = ByteBuffer.allocate(bufferSize + HEADER_SIZE);
writeHeader(current, 0, bufferSize, true);
current.position(HEADER_SIZE);
}
}
/**
* Throws exception if the bufferSize argument equals or exceeds 2^(3*8 - 1).
* See {@link OutStream#writeHeader(ByteBuffer, int, int, boolean)}.
* The bufferSize needs to be expressible in 3 bytes, and uses the least significant byte
* to indicate original/compressed bytes.
* @param bufferSize The ORC compression buffer size being checked.
* @throws IllegalArgumentException If bufferSize value exceeds threshold.
*/
public static void assertBufferSizeValid(int bufferSize) throws IllegalArgumentException {
if (bufferSize >= (1 << 23)) {
throw new IllegalArgumentException("Illegal value of ORC compression buffer size: " + bufferSize);
}
}
/**
* Allocate a new output buffer if we are compressing.
*/
private ByteBuffer getNewOutputBuffer() {
return ByteBuffer.allocate(bufferSize + HEADER_SIZE);
}
private void flip() {
current.limit(current.position());
current.position(codec == null ? 0 : HEADER_SIZE);
}
@Override
public void write(int i) throws IOException {
if (current == null) {
getNewInputBuffer();
}
if (current.remaining() < 1) {
spill();
}
uncompressedBytes += 1;
current.put((byte) i);
}
@Override
public void write(byte[] bytes, int offset, int length) throws IOException {
if (current == null) {
getNewInputBuffer();
}
int remaining = Math.min(current.remaining(), length);
current.put(bytes, offset, remaining);
uncompressedBytes += remaining;
length -= remaining;
while (length != 0) {
spill();
offset += remaining;
remaining = Math.min(current.remaining(), length);
current.put(bytes, offset, remaining);
uncompressedBytes += remaining;
length -= remaining;
}
}
private void spill() throws java.io.IOException {
// if there isn't anything in the current buffer, don't spill
if (current == null ||
current.position() == (codec == null ? 0 : HEADER_SIZE)) {
return;
}
flip();
if (codec == null) {
outputBuffer(current);
getNewInputBuffer();
} else {
if (compressed == null) {
compressed = getNewOutputBuffer();
} else if (overflow == null) {
overflow = getNewOutputBuffer();
}
int sizePosn = compressed.position();
compressed.position(compressed.position() + HEADER_SIZE);
if (codec.compress(current, compressed, overflow, options)) {
uncompressedBytes = 0;
// move position back to after the header
current.position(HEADER_SIZE);
current.limit(current.capacity());
// find the total bytes in the chunk
int totalBytes = compressed.position() - sizePosn - HEADER_SIZE;
if (overflow != null) {
totalBytes += overflow.position();
}
compressedBytes += totalBytes + HEADER_SIZE;
writeHeader(compressed, sizePosn, totalBytes, false);
// if we have less than the next header left, spill it.
if (compressed.remaining() < HEADER_SIZE) {
compressed.flip();
outputBuffer(compressed);
compressed = overflow;
overflow = null;
}
} else {
compressedBytes += uncompressedBytes + HEADER_SIZE;
uncompressedBytes = 0;
// we are using the original, but need to spill the current
// compressed buffer first. So back up to where we started,
// flip it and add it to done.
if (sizePosn != 0) {
compressed.position(sizePosn);
compressed.flip();
outputBuffer(compressed);
compressed = null;
// if we have an overflow, clear it and make it the new compress
// buffer
if (overflow != null) {
overflow.clear();
compressed = overflow;
overflow = null;
}
} else {
compressed.clear();
if (overflow != null) {
overflow.clear();
}
}
// now add the current buffer into the done list and get a new one.
current.position(0);
// update the header with the current length
writeHeader(current, 0, current.limit() - HEADER_SIZE, true);
outputBuffer(current);
getNewInputBuffer();
}
}
}
@Override
public void getPosition(PositionRecorder recorder) {
if (codec == null) {
recorder.addPosition(uncompressedBytes);
} else {
recorder.addPosition(compressedBytes);
recorder.addPosition(uncompressedBytes);
}
}
@Override
public void flush() throws IOException {
spill();
if (compressed != null && compressed.position() != 0) {
compressed.flip();
outputBuffer(compressed);
}
if (cipher != null) {
finishEncryption();
}
compressed = null;
uncompressedBytes = 0;
compressedBytes = 0;
overflow = null;
current = null;
}
@Override
public String toString() {
return name.toString();
}
@Override
public long getBufferSize() {
if (codec == null) {
return uncompressedBytes + (current == null ? 0 : current.remaining());
} else {
long result = 0;
if (current != null) {
result += current.capacity();
}
if (compressed != null) {
result += compressed.capacity();
}
if (overflow != null) {
result += overflow.capacity();
}
return result + compressedBytes;
}
}
/**
* Set suppress flag
*/
public void suppress() {
receiver.suppress();
}
}
| 13,068 | 31.6725 | 104 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/ParserUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
public class ParserUtils {
static TypeDescription.Category parseCategory(ParserUtils.StringPosition source) {
StringBuilder word = new StringBuilder();
boolean hadSpace = true;
while (source.position < source.length) {
char ch = source.value.charAt(source.position);
if (Character.isLetter(ch)) {
word.append(Character.toLowerCase(ch));
hadSpace = false;
} else if (ch == ' ') {
if (!hadSpace) {
hadSpace = true;
word.append(ch);
}
} else {
break;
}
source.position += 1;
}
String catString = word.toString();
// if there were trailing spaces, remove them.
if (hadSpace) {
catString = catString.trim();
}
if (!catString.isEmpty()) {
for (TypeDescription.Category cat : TypeDescription.Category.values()) {
if (cat.getName().equals(catString)) {
return cat;
}
}
}
throw new IllegalArgumentException("Can't parse category at " + source);
}
static int parseInt(ParserUtils.StringPosition source) {
int start = source.position;
int result = 0;
while (source.position < source.length) {
char ch = source.value.charAt(source.position);
if (!Character.isDigit(ch)) {
break;
}
result = result * 10 + (ch - '0');
source.position += 1;
}
if (source.position == start) {
throw new IllegalArgumentException("Missing integer at " + source);
}
return result;
}
public static String parseName(ParserUtils.StringPosition source) {
if (source.position == source.length) {
throw new IllegalArgumentException("Missing name at " + source);
}
final int start = source.position;
if (source.value.charAt(source.position) == '`') {
source.position += 1;
StringBuilder buffer = new StringBuilder();
boolean closed = false;
while (source.position < source.length) {
char ch = source.value.charAt(source.position);
source.position += 1;
if (ch == '`') {
if (source.position < source.length &&
source.value.charAt(source.position) == '`') {
source.position += 1;
buffer.append('`');
} else {
closed = true;
break;
}
} else {
buffer.append(ch);
}
}
if (!closed) {
source.position = start;
throw new IllegalArgumentException("Unmatched quote at " + source);
} else if (buffer.length() == 0) {
throw new IllegalArgumentException("Empty quoted field name at " + source);
}
return buffer.toString();
} else {
while (source.position < source.length) {
char ch = source.value.charAt(source.position);
if (!Character.isLetterOrDigit(ch) && ch != '_') {
break;
}
source.position += 1;
}
if (source.position == start) {
throw new IllegalArgumentException("Missing name at " + source);
}
return source.value.substring(start, source.position);
}
}
static void requireChar(ParserUtils.StringPosition source, char required) {
if (source.position >= source.length ||
source.value.charAt(source.position) != required) {
throw new IllegalArgumentException("Missing required char '" +
required + "' at " + source);
}
source.position += 1;
}
private static boolean consumeChar(ParserUtils.StringPosition source,
char ch) {
boolean result = source.position < source.length &&
source.value.charAt(source.position) == ch;
if (result) {
source.position += 1;
}
return result;
}
private static void parseUnion(TypeDescription type,
ParserUtils.StringPosition source) {
requireChar(source, '<');
do {
type.addUnionChild(parseType(source));
} while (consumeChar(source, ','));
requireChar(source, '>');
}
private static void parseStruct(TypeDescription type,
ParserUtils.StringPosition source) {
requireChar(source, '<');
boolean needComma = false;
while (!consumeChar(source, '>')) {
if (needComma) {
requireChar(source, ',');
} else {
needComma = true;
}
String fieldName = parseName(source);
requireChar(source, ':');
type.addField(fieldName, parseType(source));
}
}
public static TypeDescription parseType(ParserUtils.StringPosition source) {
TypeDescription result = new TypeDescription(parseCategory(source));
switch (result.getCategory()) {
case BINARY:
case BOOLEAN:
case BYTE:
case DATE:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
case SHORT:
case STRING:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
break;
case CHAR:
case VARCHAR:
requireChar(source, '(');
result.withMaxLength(parseInt(source));
requireChar(source, ')');
break;
case DECIMAL: {
requireChar(source, '(');
int precision = parseInt(source);
requireChar(source, ',');
result.withScale(parseInt(source));
result.withPrecision(precision);
requireChar(source, ')');
break;
}
case LIST: {
requireChar(source, '<');
TypeDescription child = parseType(source);
result.addChild(child);
requireChar(source, '>');
break;
}
case MAP: {
requireChar(source, '<');
TypeDescription keyType = parseType(source);
result.addChild(keyType);
requireChar(source, ',');
TypeDescription valueType = parseType(source);
result.addChild(valueType);
requireChar(source, '>');
break;
}
case UNION:
parseUnion(result, source);
break;
case STRUCT:
parseStruct(result, source);
break;
default:
throw new IllegalArgumentException("Unknown type " +
result.getCategory() + " at " + source);
}
return result;
}
/**
* Split a compound name into parts separated by '.'.
* @param source the string to parse into simple names
* @return a list of simple names from the source
*/
private static List<String> splitName(ParserUtils.StringPosition source) {
List<String> result = new ArrayList<>();
do {
result.add(parseName(source));
} while (consumeChar(source, '.'));
return result;
}
private static final Pattern INTEGER_PATTERN = Pattern.compile("^[0-9]+$");
public static TypeDescription findSubtype(TypeDescription schema,
ParserUtils.StringPosition source) {
return findSubtype(schema, source, true);
}
public interface TypeVisitor {
/**
* As we navigate to the column, call this on each level
* @param type new level we are moving to
* @param posn the position relative to the parent
*/
void visit(TypeDescription type, int posn);
}
public static class TypeFinder implements TypeVisitor {
public TypeDescription current;
public TypeFinder(TypeDescription schema) {
current = schema;
}
@Override
public void visit(TypeDescription type, int posn) {
current = type;
}
}
public static TypeDescription findSubtype(TypeDescription schema,
ParserUtils.StringPosition source,
boolean isSchemaEvolutionCaseAware) {
TypeFinder result = new TypeFinder(removeAcid(schema));
findColumn(result.current, source, isSchemaEvolutionCaseAware, result);
return result.current;
}
private static TypeDescription removeAcid(TypeDescription schema) {
return SchemaEvolution.checkAcidSchema(schema)
? SchemaEvolution.getBaseRow(schema) : schema;
}
private static int findCaseInsensitive(List<String> list, String goal) {
for (int i = 0; i < list.size(); i++) {
if (list.get(i).equalsIgnoreCase(goal)) {
return i;
}
}
return -1;
}
public static void findSubtype(TypeDescription schema,
int goal,
TypeVisitor visitor) {
TypeDescription current = schema;
int id = schema.getId();
if (goal < id || goal > schema.getMaximumId()) {
throw new IllegalArgumentException("Unknown type id " + goal + " in " +
current.toJson());
}
while (id != goal) {
List<TypeDescription> children = current.getChildren();
for(int i=0; i < children.size(); ++i) {
TypeDescription child = children.get(i);
if (goal <= child.getMaximumId()) {
current = child;
visitor.visit(current, i);
break;
}
}
id = current.getId();
}
}
/**
* Find a column in a schema by walking down the type tree to find the right column.
* @param schema the schema to look in
* @param source the name of the column
* @param isSchemaEvolutionCaseAware should the string compare be case sensitive
* @param visitor The visitor, which is called on each level
*/
public static void findColumn(TypeDescription schema,
ParserUtils.StringPosition source,
boolean isSchemaEvolutionCaseAware,
TypeVisitor visitor) {
findColumn(schema, ParserUtils.splitName(source), isSchemaEvolutionCaseAware, visitor);
}
/**
* Find a column in a schema by walking down the type tree to find the right column.
* @param schema the schema to look in
* @param names the name of the column broken into a list of names per level
* @param isSchemaEvolutionCaseAware should the string compare be case sensitive
* @param visitor The visitor, which is called on each level
*/
public static void findColumn(TypeDescription schema,
List<String> names,
boolean isSchemaEvolutionCaseAware,
TypeVisitor visitor) {
if (names.size() == 1 && INTEGER_PATTERN.matcher(names.get(0)).matches()) {
findSubtype(schema, Integer.parseInt(names.get(0)), visitor);
return;
}
TypeDescription current = schema;
int posn;
while (names.size() > 0) {
String first = names.remove(0);
switch (current.getCategory()) {
case STRUCT: {
posn = isSchemaEvolutionCaseAware
? current.getFieldNames().indexOf(first)
: findCaseInsensitive(current.getFieldNames(), first);
break;
}
case LIST:
if (first.equals("_elem")) {
posn = 0;
} else {
posn = -1;
}
break;
case MAP:
if (first.equals("_key")) {
posn = 0;
} else if (first.equals("_value")) {
posn = 1;
} else {
posn = -1;
}
break;
case UNION: {
try {
posn = Integer.parseInt(first);
if (posn < 0 || posn >= current.getChildren().size()) {
throw new NumberFormatException("off end of union");
}
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Field " + first +
"not found in " + current, e);
}
break;
}
default:
posn = -1;
}
if (posn < 0) {
throw new IllegalArgumentException("Field " + first +
" not found in " + current);
}
current = current.getChildren().get(posn);
visitor.visit(current, posn);
}
}
static class ColumnFinder implements TypeVisitor {
// top and current are interpreted as a union, only one of them is expected to be set at any
// given time.
private ColumnVector[] top;
private ColumnVector current = null;
private final ColumnVector[] result;
private int resultIdx = 0;
ColumnFinder(TypeDescription schema, ColumnVector[] columnVectors, int levels) {
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
top = columnVectors;
result = new ColumnVector[levels];
} else {
result = new ColumnVector[levels + 1];
current = columnVectors[0];
top = null;
addResult(current);
}
}
ColumnFinder(TypeDescription schema, VectorizedRowBatch vectorizedRowBatch, int levels) {
this(schema, vectorizedRowBatch.cols, levels);
}
private void addResult(ColumnVector vector) {
result[resultIdx] = vector;
resultIdx += 1;
}
@Override
public void visit(TypeDescription type, int posn) {
if (current == null) {
current = top[posn];
top = null;
} else {
current = navigate(current, posn);
}
addResult(current);
}
private ColumnVector navigate(ColumnVector parent, int posn) {
if (parent instanceof ListColumnVector) {
return ((ListColumnVector) parent).child;
} else if (parent instanceof StructColumnVector) {
return ((StructColumnVector) parent).fields[posn];
} else if (parent instanceof UnionColumnVector) {
return ((UnionColumnVector) parent).fields[posn];
} else if (parent instanceof MapColumnVector) {
MapColumnVector m = (MapColumnVector) parent;
return posn == 0 ? m.keys : m.values;
}
throw new IllegalArgumentException("Unknown complex column vector " + parent.getClass());
}
}
public static ColumnVector[] findColumnVectors(TypeDescription schema,
StringPosition source,
boolean isCaseSensitive,
VectorizedRowBatch batch) {
List<String> names = ParserUtils.splitName(source);
TypeDescription schemaToUse = removeAcid(schema);
ColumnVector[] columnVectors = SchemaEvolution.checkAcidSchema(schema)
? ((StructColumnVector) batch.cols[batch.cols.length - 1]).fields : batch.cols;
ColumnFinder result = new ColumnFinder(schemaToUse, columnVectors, names.size());
findColumn(schemaToUse, names, isCaseSensitive, result);
return result.result;
}
public static List<TypeDescription> findSubtypeList(TypeDescription schema,
StringPosition source) {
List<TypeDescription> result = new ArrayList<>();
if (source.hasCharactersLeft()) {
do {
result.add(findSubtype(schema, source));
} while (consumeChar(source, ','));
}
return result;
}
public static class StringPosition {
final String value;
int position;
final int length;
public StringPosition(String value) {
this.value = value == null ? "" : value;
position = 0;
length = this.value.length();
}
@Override
public String toString() {
return '\'' + value.substring(0, position) + '^' +
value.substring(position) + '\'';
}
public String fromPosition(int start) {
return value.substring(start, this.position);
}
public boolean hasCharactersLeft() {
return position != length;
}
}
/**
* Annotate the given schema with the encryption information.
*
* Format of the string is a key-list.
* <ul>
* <li>key-list = key (';' key-list)?</li>
* <li>key = key-name ':' field-list</li>
* <li>field-list = field-name ( ',' field-list )?</li>
* <li>field-name = number | field-part ('.' field-name)?</li>
* <li>field-part = quoted string | simple name</li>
* </ul>
*
* @param source the string to parse
* @param schema the top level schema
* @throws IllegalArgumentException if there are conflicting keys for a field
*/
public static void parseKeys(StringPosition source, TypeDescription schema) {
if (source.hasCharactersLeft()) {
do {
String keyName = parseName(source);
requireChar(source, ':');
for (TypeDescription field : findSubtypeList(schema, source)) {
String prev = field.getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE);
if (prev != null && !prev.equals(keyName)) {
throw new IllegalArgumentException("Conflicting encryption keys " +
keyName + " and " + prev);
}
field.setAttribute(TypeDescription.ENCRYPT_ATTRIBUTE, keyName);
}
} while (consumeChar(source, ';'));
}
}
/**
* Annotate the given schema with the masking information.
*
* Format of the string is a mask-list.
* <ul>
* <li>mask-list = mask (';' mask-list)?</li>
* <li>mask = mask-name (',' parameter)* ':' field-list</li>
* <li>field-list = field-name ( ',' field-list )?</li>
* <li>field-name = number | field-part ('.' field-name)?</li>
* <li>field-part = quoted string | simple name</li>
* </ul>
*
* @param source the string to parse
* @param schema the top level schema
* @throws IllegalArgumentException if there are conflicting masks for a field
*/
public static void parseMasks(StringPosition source, TypeDescription schema) {
if (source.hasCharactersLeft()) {
do {
// parse the mask and parameters, but only get the underlying string
int start = source.position;
parseName(source);
while (consumeChar(source, ',')) {
parseName(source);
}
String maskString = source.fromPosition(start);
requireChar(source, ':');
for (TypeDescription field : findSubtypeList(schema, source)) {
String prev = field.getAttributeValue(TypeDescription.MASK_ATTRIBUTE);
if (prev != null && !prev.equals(maskString)) {
throw new IllegalArgumentException("Conflicting encryption masks " +
maskString + " and " + prev);
}
field.setAttribute(TypeDescription.MASK_ATTRIBUTE, maskString);
}
} while (consumeChar(source, ';'));
}
}
public static MaskDescriptionImpl buildMaskDescription(String value) {
StringPosition source = new StringPosition(value);
String maskName = parseName(source);
List<String> params = new ArrayList<>();
while (consumeChar(source, ',')) {
params.add(parseName(source));
}
return new MaskDescriptionImpl(maskName, params.toArray(new String[0]));
}
}
| 20,045 | 32.804384 | 97 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/PhysicalFsWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedOutputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.CompressionCodec;
import org.apache.orc.EncryptionVariant;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.impl.writer.WriterEncryptionKey;
import org.apache.orc.impl.writer.WriterEncryptionVariant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
public class PhysicalFsWriter implements PhysicalWriter {
private static final Logger LOG = LoggerFactory.getLogger(PhysicalFsWriter.class);
private static final int HDFS_BUFFER_SIZE = 256 * 1024;
private FSDataOutputStream rawWriter;
private final DirectStream rawStream;
// the compressed metadata information outStream
private OutStream compressStream;
// a protobuf outStream around streamFactory
private CodedOutputStream codedCompressStream;
private Path path;
private final HadoopShims shims;
private final long blockSize;
private final int maxPadding;
private final StreamOptions compress;
private final OrcFile.CompressionStrategy compressionStrategy;
private final boolean addBlockPadding;
private final boolean writeVariableLengthBlocks;
private final VariantTracker unencrypted;
private long headerLength;
private long stripeStart;
// The position of the last time we wrote a short block, which becomes the
// natural blocks
private long blockOffset;
private int metadataLength;
private int stripeStatisticsLength = 0;
private int footerLength;
private int stripeNumber = 0;
private final Map<WriterEncryptionVariant, VariantTracker> variants = new TreeMap<>();
public PhysicalFsWriter(FileSystem fs,
Path path,
OrcFile.WriterOptions opts
) throws IOException {
this(fs, path, opts, new WriterEncryptionVariant[0]);
}
public PhysicalFsWriter(FileSystem fs,
Path path,
OrcFile.WriterOptions opts,
WriterEncryptionVariant[] encryption
) throws IOException {
this(fs.create(path, opts.getOverwrite(), HDFS_BUFFER_SIZE,
fs.getDefaultReplication(path), opts.getBlockSize()), opts, encryption);
this.path = path;
LOG.info("ORC writer created for path: {} with stripeSize: {} blockSize: {}" +
" compression: {}", path, opts.getStripeSize(), blockSize, compress);
}
public PhysicalFsWriter(FSDataOutputStream outputStream,
OrcFile.WriterOptions opts,
WriterEncryptionVariant[] encryption
) throws IOException {
this.rawWriter = outputStream;
long defaultStripeSize = opts.getStripeSize();
this.addBlockPadding = opts.getBlockPadding();
if (opts.isEnforceBufferSize()) {
this.compress = new StreamOptions(opts.getBufferSize());
} else {
this.compress = new StreamOptions(
WriterImpl.getEstimatedBufferSize(defaultStripeSize,
opts.getSchema().getMaximumId() + 1,
opts.getBufferSize()));
}
CompressionCodec codec = OrcCodecPool.getCodec(opts.getCompress());
if (codec != null){
compress.withCodec(codec, codec.getDefaultOptions());
}
this.compressionStrategy = opts.getCompressionStrategy();
this.maxPadding = (int) (opts.getPaddingTolerance() * defaultStripeSize);
this.blockSize = opts.getBlockSize();
blockOffset = 0;
unencrypted = new VariantTracker(opts.getSchema(), compress);
writeVariableLengthBlocks = opts.getWriteVariableLengthBlocks();
shims = opts.getHadoopShims();
rawStream = new DirectStream(rawWriter);
compressStream = new OutStream("stripe footer", compress, rawStream);
codedCompressStream = CodedOutputStream.newInstance(compressStream);
for(WriterEncryptionVariant variant: encryption) {
WriterEncryptionKey key = variant.getKeyDescription();
StreamOptions encryptOptions =
new StreamOptions(unencrypted.options)
.withEncryption(key.getAlgorithm(), variant.getFileFooterKey());
variants.put(variant, new VariantTracker(variant.getRoot(), encryptOptions));
}
}
/**
* Record the information about each column encryption variant.
* The unencrypted data and each encrypted column root are variants.
*/
protected static class VariantTracker {
// the streams that make up the current stripe
protected final Map<StreamName, BufferedStream> streams = new TreeMap<>();
private final int rootColumn;
private final int lastColumn;
protected final StreamOptions options;
// a list for each column covered by this variant
// the elements in the list correspond to each stripe in the file
protected final List<OrcProto.ColumnStatistics>[] stripeStats;
protected final List<OrcProto.Stream> stripeStatsStreams = new ArrayList<>();
protected final OrcProto.ColumnStatistics[] fileStats;
VariantTracker(TypeDescription schema, StreamOptions options) {
rootColumn = schema.getId();
lastColumn = schema.getMaximumId();
this.options = options;
stripeStats = new List[schema.getMaximumId() - schema.getId() + 1];
for(int i=0; i < stripeStats.length; ++i) {
stripeStats[i] = new ArrayList<>();
}
fileStats = new OrcProto.ColumnStatistics[stripeStats.length];
}
public BufferedStream createStream(StreamName name) {
BufferedStream result = new BufferedStream();
streams.put(name, result);
return result;
}
/**
* Place the streams in the appropriate area while updating the sizes
* with the number of bytes in the area.
* @param area the area to write
* @param sizes the sizes of the areas
* @return the list of stream descriptions to add
*/
public List<OrcProto.Stream> placeStreams(StreamName.Area area,
SizeCounters sizes) {
List<OrcProto.Stream> result = new ArrayList<>(streams.size());
for(Map.Entry<StreamName, BufferedStream> stream: streams.entrySet()) {
StreamName name = stream.getKey();
BufferedStream bytes = stream.getValue();
if (name.getArea() == area && !bytes.isSuppressed) {
OrcProto.Stream.Builder builder = OrcProto.Stream.newBuilder();
long size = bytes.getOutputSize();
if (area == StreamName.Area.INDEX) {
sizes.index += size;
} else {
sizes.data += size;
}
builder.setColumn(name.getColumn())
.setKind(name.getKind())
.setLength(size);
result.add(builder.build());
}
}
return result;
}
/**
* Write the streams in the appropriate area.
* @param area the area to write
* @param raw the raw stream to write to
*/
public void writeStreams(StreamName.Area area,
FSDataOutputStream raw) throws IOException {
for(Map.Entry<StreamName, BufferedStream> stream: streams.entrySet()) {
if (stream.getKey().getArea() == area) {
stream.getValue().spillToDiskAndClear(raw);
}
}
}
/**
* Computed the size of the given column on disk for this stripe.
* It excludes the index streams.
* @param column a column id
* @return the total number of bytes
*/
public long getFileBytes(int column) {
long result = 0;
if (column >= rootColumn && column <= lastColumn) {
for(Map.Entry<StreamName, BufferedStream> entry: streams.entrySet()) {
StreamName name = entry.getKey();
if (name.getColumn() == column &&
name.getArea() != StreamName.Area.INDEX) {
result += entry.getValue().getOutputSize();
}
}
}
return result;
}
}
VariantTracker getVariant(EncryptionVariant column) {
if (column == null) {
return unencrypted;
}
return variants.get(column);
}
/**
* Get the number of bytes for a file in a given column
* by finding all the streams (not suppressed)
* for a given column and returning the sum of their sizes.
* excludes index
*
* @param column column from which to get file size
* @return number of bytes for the given column
*/
@Override
public long getFileBytes(int column, WriterEncryptionVariant variant) {
return getVariant(variant).getFileBytes(column);
}
@Override
public StreamOptions getStreamOptions() {
return unencrypted.options;
}
private static final byte[] ZEROS = new byte[64*1024];
private static void writeZeros(OutputStream output,
long remaining) throws IOException {
while (remaining > 0) {
long size = Math.min(ZEROS.length, remaining);
output.write(ZEROS, 0, (int) size);
remaining -= size;
}
}
/**
* Do any required shortening of the HDFS block or padding to avoid stradling
* HDFS blocks. This is called before writing the current stripe.
* @param stripeSize the number of bytes in the current stripe
*/
private void padStripe(long stripeSize) throws IOException {
this.stripeStart = rawWriter.getPos();
long previousBytesInBlock = (stripeStart - blockOffset) % blockSize;
// We only have options if this isn't the first stripe in the block
if (previousBytesInBlock > 0) {
if (previousBytesInBlock + stripeSize >= blockSize) {
// Try making a short block
if (writeVariableLengthBlocks &&
shims.endVariableLengthBlock(rawWriter)) {
blockOffset = stripeStart;
} else if (addBlockPadding) {
// if we cross the block boundary, figure out what we should do
long padding = blockSize - previousBytesInBlock;
if (padding <= maxPadding) {
writeZeros(rawWriter, padding);
stripeStart += padding;
}
}
}
}
}
/**
* An output receiver that writes the ByteBuffers to the output stream
* as they are received.
*/
private static class DirectStream implements OutputReceiver {
private final FSDataOutputStream output;
DirectStream(FSDataOutputStream output) {
this.output = output;
}
@Override
public void output(ByteBuffer buffer) throws IOException {
output.write(buffer.array(), buffer.arrayOffset() + buffer.position(),
buffer.remaining());
}
@Override
public void suppress() {
throw new UnsupportedOperationException("Can't suppress direct stream");
}
}
private void writeStripeFooter(OrcProto.StripeFooter footer,
SizeCounters sizes,
OrcProto.StripeInformation.Builder dirEntry) throws IOException {
footer.writeTo(codedCompressStream);
codedCompressStream.flush();
compressStream.flush();
dirEntry.setOffset(stripeStart);
dirEntry.setFooterLength(rawWriter.getPos() - stripeStart - sizes.total());
}
/**
* Write the saved encrypted stripe statistic in a variant out to the file.
* The streams that are written are added to the tracker.stripeStatsStreams.
* @param output the file we are writing to
* @param stripeNumber the number of stripes in the file
* @param tracker the variant to write out
*/
static void writeEncryptedStripeStatistics(DirectStream output,
int stripeNumber,
VariantTracker tracker
) throws IOException {
StreamOptions options = new StreamOptions(tracker.options);
tracker.stripeStatsStreams.clear();
for(int col = tracker.rootColumn;
col < tracker.rootColumn + tracker.stripeStats.length; ++col) {
options.modifyIv(CryptoUtils.modifyIvForStream(col,
OrcProto.Stream.Kind.STRIPE_STATISTICS, stripeNumber + 1));
OutStream stream = new OutStream("stripe stats for " + col,
options, output);
OrcProto.ColumnarStripeStatistics stats =
OrcProto.ColumnarStripeStatistics.newBuilder()
.addAllColStats(tracker.stripeStats[col - tracker.rootColumn])
.build();
long start = output.output.getPos();
stats.writeTo(stream);
stream.flush();
OrcProto.Stream description = OrcProto.Stream.newBuilder()
.setColumn(col)
.setKind(OrcProto.Stream.Kind.STRIPE_STATISTICS)
.setLength(output.output.getPos() - start)
.build();
tracker.stripeStatsStreams.add(description);
}
}
/**
* Merge the saved unencrypted stripe statistics into the Metadata section
* of the footer.
* @param builder the Metadata section of the file
* @param stripeCount the number of stripes in the file
* @param stats the stripe statistics
*/
static void setUnencryptedStripeStatistics(OrcProto.Metadata.Builder builder,
int stripeCount,
List<OrcProto.ColumnStatistics>[] stats) {
// Make the unencrypted stripe stats into lists of StripeStatistics.
builder.clearStripeStats();
for(int s=0; s < stripeCount; ++s) {
OrcProto.StripeStatistics.Builder stripeStats =
OrcProto.StripeStatistics.newBuilder();
for(List<OrcProto.ColumnStatistics> col: stats) {
stripeStats.addColStats(col.get(s));
}
builder.addStripeStats(stripeStats.build());
}
}
static void setEncryptionStatistics(OrcProto.Encryption.Builder encryption,
int stripeNumber,
Collection<VariantTracker> variants
) throws IOException {
int v = 0;
for(VariantTracker variant: variants) {
OrcProto.EncryptionVariant.Builder variantBuilder =
encryption.getVariantsBuilder(v++);
// Add the stripe statistics streams to the variant description.
variantBuilder.clearStripeStatistics();
variantBuilder.addAllStripeStatistics(variant.stripeStatsStreams);
// Serialize and encrypt the file statistics.
OrcProto.FileStatistics.Builder file = OrcProto.FileStatistics.newBuilder();
for(OrcProto.ColumnStatistics col: variant.fileStats) {
file.addColumn(col);
}
StreamOptions options = new StreamOptions(variant.options);
options.modifyIv(CryptoUtils.modifyIvForStream(variant.rootColumn,
OrcProto.Stream.Kind.FILE_STATISTICS, stripeNumber + 1));
BufferedStream buffer = new BufferedStream();
OutStream stream = new OutStream("stats for " + variant, options, buffer);
file.build().writeTo(stream);
stream.flush();
variantBuilder.setFileStatistics(buffer.getBytes());
}
}
@Override
public void writeFileMetadata(OrcProto.Metadata.Builder builder) throws IOException {
long stripeStatisticsStart = rawWriter.getPos();
for(VariantTracker variant: variants.values()) {
writeEncryptedStripeStatistics(rawStream, stripeNumber, variant);
}
setUnencryptedStripeStatistics(builder, stripeNumber,
unencrypted.stripeStats);
long metadataStart = rawWriter.getPos();
builder.build().writeTo(codedCompressStream);
codedCompressStream.flush();
compressStream.flush();
this.stripeStatisticsLength = (int) (metadataStart - stripeStatisticsStart);
this.metadataLength = (int) (rawWriter.getPos() - metadataStart);
}
static void addUnencryptedStatistics(OrcProto.Footer.Builder builder,
OrcProto.ColumnStatistics[] stats) {
for(OrcProto.ColumnStatistics stat: stats) {
builder.addStatistics(stat);
}
}
@Override
public void writeFileFooter(OrcProto.Footer.Builder builder) throws IOException {
if (variants.size() > 0) {
OrcProto.Encryption.Builder encryption = builder.getEncryptionBuilder();
setEncryptionStatistics(encryption, stripeNumber, variants.values());
}
addUnencryptedStatistics(builder, unencrypted.fileStats);
long bodyLength = rawWriter.getPos() - metadataLength - stripeStatisticsLength;
builder.setContentLength(bodyLength);
builder.setHeaderLength(headerLength);
long startPosn = rawWriter.getPos();
OrcProto.Footer footer = builder.build();
footer.writeTo(codedCompressStream);
codedCompressStream.flush();
compressStream.flush();
this.footerLength = (int) (rawWriter.getPos() - startPosn);
}
@Override
public long writePostScript(OrcProto.PostScript.Builder builder) throws IOException {
builder.setFooterLength(footerLength);
builder.setMetadataLength(metadataLength);
if (variants.size() > 0) {
builder.setStripeStatisticsLength(stripeStatisticsLength);
}
OrcProto.PostScript ps = builder.build();
// need to write this uncompressed
long startPosn = rawWriter.getPos();
ps.writeTo(rawWriter);
long length = rawWriter.getPos() - startPosn;
if (length > 255) {
throw new IllegalArgumentException("PostScript too large at " + length);
}
rawWriter.writeByte((int)length);
return rawWriter.getPos();
}
@Override
public void close() throws IOException {
// We don't use the codec directly but do give it out codec in getCompressionCodec;
// that is used in tests, for boolean checks, and in StreamFactory. Some of the changes that
// would get rid of this pattern require cross-project interface changes, so just return the
// codec for now.
CompressionCodec codec = compress.getCodec();
if (codec != null) {
OrcCodecPool.returnCodec(codec.getKind(), codec);
}
compress.withCodec(null, null);
rawWriter.close();
rawWriter = null;
}
@Override
public void flush() throws IOException {
rawWriter.hflush();
}
@Override
public void appendRawStripe(ByteBuffer buffer,
OrcProto.StripeInformation.Builder dirEntry) throws IOException {
long start = rawWriter.getPos();
int length = buffer.remaining();
long availBlockSpace = blockSize - (start % blockSize);
// see if stripe can fit in the current hdfs block, else pad the remaining
// space in the block
if (length < blockSize && length > availBlockSpace &&
addBlockPadding) {
byte[] pad = new byte[(int) Math.min(HDFS_BUFFER_SIZE, availBlockSpace)];
LOG.info("Padding ORC by {} bytes while merging", availBlockSpace);
start += availBlockSpace;
while (availBlockSpace > 0) {
int writeLen = (int) Math.min(availBlockSpace, pad.length);
rawWriter.write(pad, 0, writeLen);
availBlockSpace -= writeLen;
}
}
rawWriter.write(buffer.array(), buffer.arrayOffset() + buffer.position(),
length);
dirEntry.setOffset(start);
stripeNumber += 1;
}
/**
* This class is used to hold the contents of streams as they are buffered.
* The TreeWriters write to the outStream and the codec compresses the
* data as buffers fill up and stores them in the output list. When the
* stripe is being written, the whole stream is written to the file.
*/
static final class BufferedStream implements OutputReceiver {
private boolean isSuppressed = false;
private final List<ByteBuffer> output = new ArrayList<>();
@Override
public void output(ByteBuffer buffer) {
if (!isSuppressed) {
output.add(buffer);
}
}
@Override
public void suppress() {
isSuppressed = true;
output.clear();
}
/**
* Write any saved buffers to the OutputStream if needed, and clears all the
* buffers.
* @return true if the stream was written
*/
boolean spillToDiskAndClear(FSDataOutputStream raw) throws IOException {
if (!isSuppressed) {
for (ByteBuffer buffer: output) {
raw.write(buffer.array(), buffer.arrayOffset() + buffer.position(),
buffer.remaining());
}
output.clear();
return true;
}
isSuppressed = false;
return false;
}
/**
* Get the buffer as a protobuf ByteString and clears the BufferedStream.
* @return the bytes
*/
ByteString getBytes() {
int len = output.size();
if (len == 0) {
return ByteString.EMPTY;
} else {
ByteString result = ByteString.copyFrom(output.get(0));
for (int i=1; i < output.size(); ++i) {
result = result.concat(ByteString.copyFrom(output.get(i)));
}
output.clear();
return result;
}
}
/**
* Get the stream as a ByteBuffer and clear it.
* @return a single ByteBuffer with the contents of the stream
*/
ByteBuffer getByteBuffer() {
ByteBuffer result;
if (output.size() == 1) {
result = output.get(0);
} else {
result = ByteBuffer.allocate((int) getOutputSize());
for (ByteBuffer buffer : output) {
result.put(buffer);
}
output.clear();
result.flip();
}
return result;
}
/**
* Get the number of bytes that will be written to the output.
*
* Assumes the stream writing into this receiver has already been flushed.
* @return number of bytes
*/
public long getOutputSize() {
long result = 0;
for (ByteBuffer buffer: output) {
result += buffer.remaining();
}
return result;
}
}
static class SizeCounters {
long index = 0;
long data = 0;
long total() {
return index + data;
}
}
void buildStreamList(OrcProto.StripeFooter.Builder footerBuilder,
SizeCounters sizes
) throws IOException {
footerBuilder.addAllStreams(
unencrypted.placeStreams(StreamName.Area.INDEX, sizes));
final long unencryptedIndexSize = sizes.index;
int v = 0;
for (VariantTracker variant: variants.values()) {
OrcProto.StripeEncryptionVariant.Builder builder =
footerBuilder.getEncryptionBuilder(v++);
builder.addAllStreams(
variant.placeStreams(StreamName.Area.INDEX, sizes));
}
if (sizes.index != unencryptedIndexSize) {
// add a placeholder that covers the hole where the encrypted indexes are
footerBuilder.addStreams(OrcProto.Stream.newBuilder()
.setKind(OrcProto.Stream.Kind.ENCRYPTED_INDEX)
.setLength(sizes.index - unencryptedIndexSize));
}
footerBuilder.addAllStreams(
unencrypted.placeStreams(StreamName.Area.DATA, sizes));
final long unencryptedDataSize = sizes.data;
v = 0;
for (VariantTracker variant: variants.values()) {
OrcProto.StripeEncryptionVariant.Builder builder =
footerBuilder.getEncryptionBuilder(v++);
builder.addAllStreams(
variant.placeStreams(StreamName.Area.DATA, sizes));
}
if (sizes.data != unencryptedDataSize) {
// add a placeholder that covers the hole where the encrypted indexes are
footerBuilder.addStreams(OrcProto.Stream.newBuilder()
.setKind(OrcProto.Stream.Kind.ENCRYPTED_DATA)
.setLength(sizes.data - unencryptedDataSize));
}
}
@Override
public void finalizeStripe(OrcProto.StripeFooter.Builder footerBuilder,
OrcProto.StripeInformation.Builder dirEntry
) throws IOException {
SizeCounters sizes = new SizeCounters();
buildStreamList(footerBuilder, sizes);
OrcProto.StripeFooter footer = footerBuilder.build();
// Do we need to pad the file so the stripe doesn't straddle a block boundary?
padStripe(sizes.total() + footer.getSerializedSize());
// write the unencrypted index streams
unencrypted.writeStreams(StreamName.Area.INDEX, rawWriter);
// write the encrypted index streams
for (VariantTracker variant: variants.values()) {
variant.writeStreams(StreamName.Area.INDEX, rawWriter);
}
// write the unencrypted data streams
unencrypted.writeStreams(StreamName.Area.DATA, rawWriter);
// write out the unencrypted data streams
for (VariantTracker variant: variants.values()) {
variant.writeStreams(StreamName.Area.DATA, rawWriter);
}
// Write out the footer.
writeStripeFooter(footer, sizes, dirEntry);
// fill in the data sizes
dirEntry.setDataLength(sizes.data);
dirEntry.setIndexLength(sizes.index);
stripeNumber += 1;
}
@Override
public void writeHeader() throws IOException {
rawWriter.writeBytes(OrcFile.MAGIC);
headerLength = rawWriter.getPos();
}
@Override
public BufferedStream createDataStream(StreamName name) {
VariantTracker variant = getVariant(name.getEncryption());
BufferedStream result = variant.streams.get(name);
if (result == null) {
result = new BufferedStream();
variant.streams.put(name, result);
}
return result;
}
private StreamOptions getOptions(OrcProto.Stream.Kind kind) {
return SerializationUtils.getCustomizedCodec(compress, compressionStrategy,
kind);
}
protected OutputStream createIndexStream(StreamName name) {
BufferedStream buffer = createDataStream(name);
VariantTracker tracker = getVariant(name.getEncryption());
StreamOptions options =
SerializationUtils.getCustomizedCodec(tracker.options,
compressionStrategy, name.getKind());
if (options.isEncrypted()) {
if (options == tracker.options) {
options = new StreamOptions(options);
}
options.modifyIv(CryptoUtils.modifyIvForStream(name, stripeNumber + 1));
}
return new OutStream(name.toString(), options, buffer);
}
@Override
public void writeIndex(StreamName name,
OrcProto.RowIndex.Builder index
) throws IOException {
OutputStream stream = createIndexStream(name);
index.build().writeTo(stream);
stream.flush();
}
@Override
public void writeBloomFilter(StreamName name,
OrcProto.BloomFilterIndex.Builder bloom
) throws IOException {
OutputStream stream = createIndexStream(name);
bloom.build().writeTo(stream);
stream.flush();
}
@Override
public void writeStatistics(StreamName name,
OrcProto.ColumnStatistics.Builder statistics
) {
VariantTracker tracker = getVariant(name.getEncryption());
if (name.getKind() == OrcProto.Stream.Kind.FILE_STATISTICS) {
tracker.fileStats[name.getColumn() - tracker.rootColumn] =
statistics.build();
} else {
tracker.stripeStats[name.getColumn() - tracker.rootColumn]
.add(statistics.build());
}
}
@Override
public String toString() {
if (path != null) {
return path.toString();
} else {
return ByteString.EMPTY.toString();
}
}
}
| 28,577 | 35.638462 | 98 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/PositionProvider.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
/**
* An interface used for seeking to a row index.
*/
public interface PositionProvider {
long getNext();
}
| 950 | 34.222222 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/PositionRecorder.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
/**
* An interface for recording positions in a stream.
*/
public interface PositionRecorder {
void addPosition(long offset);
}
| 968 | 36.269231 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/PositionedOutputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.io.OutputStream;
import java.util.function.Consumer;
public abstract class PositionedOutputStream extends OutputStream {
/**
* Record the current position to the recorder.
* @param recorder the object that receives the position
* @throws IOException
*/
public abstract void getPosition(PositionRecorder recorder
) throws IOException;
/**
* Get the memory size currently allocated as buffer associated with this
* stream.
* @return the number of bytes used by buffers.
*/
public abstract long getBufferSize();
/**
* Change the current Initialization Vector (IV) for the encryption.
* Has no effect if the stream is not encrypted.
* @param modifier a function to modify the IV in place
*/
public abstract void changeIv(Consumer<byte[]> modifier);
}
| 1,710 | 34.645833 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/ReaderImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.TextFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.hadoop.io.Text;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import org.apache.orc.DataMaskDescription;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.EncryptionKey;
import org.apache.orc.EncryptionVariant;
import org.apache.orc.FileFormatException;
import org.apache.orc.FileMetadata;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.ReaderEncryptionVariant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.Key;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.function.Supplier;
public class ReaderImpl implements Reader {
private static final Logger LOG = LoggerFactory.getLogger(ReaderImpl.class);
private static final int DIRECTORY_SIZE_GUESS = 16 * 1024;
public static final int DEFAULT_COMPRESSION_BLOCK_SIZE = 256 * 1024;
private final long maxLength;
protected final Path path;
protected final OrcFile.ReaderOptions options;
protected final org.apache.orc.CompressionKind compressionKind;
protected FSDataInputStream file;
protected int bufferSize;
// the unencrypted stripe statistics or null if they haven't been read yet
protected List<OrcProto.StripeStatistics> stripeStatistics;
private final int metadataSize;
protected final List<OrcProto.Type> types;
private final TypeDescription schema;
private final List<OrcProto.UserMetadataItem> userMetadata;
private final List<OrcProto.ColumnStatistics> fileStats;
private final List<StripeInformation> stripes;
protected final int rowIndexStride;
private final long contentLength, numberOfRows;
private final ReaderEncryption encryption;
private long deserializedSize = -1;
protected final Configuration conf;
protected final boolean useUTCTimestamp;
private final List<Integer> versionList;
private final OrcFile.WriterVersion writerVersion;
private final String softwareVersion;
protected final OrcTail tail;
public static class StripeInformationImpl
implements StripeInformation {
private final long stripeId;
private final long originalStripeId;
private final byte[][] encryptedKeys;
private final OrcProto.StripeInformation stripe;
public StripeInformationImpl(OrcProto.StripeInformation stripe,
long stripeId,
long previousOriginalStripeId,
byte[][] previousKeys) {
this.stripe = stripe;
this.stripeId = stripeId;
if (stripe.hasEncryptStripeId()) {
originalStripeId = stripe.getEncryptStripeId();
} else {
originalStripeId = previousOriginalStripeId + 1;
}
if (stripe.getEncryptedLocalKeysCount() != 0) {
encryptedKeys = new byte[stripe.getEncryptedLocalKeysCount()][];
for(int v=0; v < encryptedKeys.length; ++v) {
encryptedKeys[v] = stripe.getEncryptedLocalKeys(v).toByteArray();
}
} else {
encryptedKeys = previousKeys;
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
StripeInformationImpl that = (StripeInformationImpl) o;
return stripeId == that.stripeId &&
originalStripeId == that.originalStripeId &&
Arrays.deepEquals(encryptedKeys, that.encryptedKeys) &&
stripe.equals(that.stripe);
}
@Override
public int hashCode() {
int result = Objects.hash(stripeId, originalStripeId, stripe);
result = 31 * result + Arrays.hashCode(encryptedKeys);
return result;
}
@Override
public long getOffset() {
return stripe.getOffset();
}
@Override
public long getLength() {
return stripe.getDataLength() + getIndexLength() + getFooterLength();
}
@Override
public long getDataLength() {
return stripe.getDataLength();
}
@Override
public long getFooterLength() {
return stripe.getFooterLength();
}
@Override
public long getIndexLength() {
return stripe.getIndexLength();
}
@Override
public long getNumberOfRows() {
return stripe.getNumberOfRows();
}
@Override
public long getStripeId() {
return stripeId;
}
@Override
public boolean hasEncryptionStripeId() {
return stripe.hasEncryptStripeId();
}
@Override
public long getEncryptionStripeId() {
return originalStripeId;
}
@Override
public byte[][] getEncryptedLocalKeys() {
return encryptedKeys;
}
@Override
public String toString() {
return "offset: " + getOffset() + " data: " +
getDataLength() + " rows: " + getNumberOfRows() + " tail: " +
getFooterLength() + " index: " + getIndexLength() +
(!hasEncryptionStripeId() || stripeId == originalStripeId - 1
? "" : " encryption id: " + originalStripeId);
}
}
@Override
public long getNumberOfRows() {
return numberOfRows;
}
@Override
public List<String> getMetadataKeys() {
List<String> result = new ArrayList<>();
for(OrcProto.UserMetadataItem item: userMetadata) {
result.add(item.getName());
}
return result;
}
@Override
public ByteBuffer getMetadataValue(String key) {
for(OrcProto.UserMetadataItem item: userMetadata) {
if (item.hasName() && item.getName().equals(key)) {
return item.getValue().asReadOnlyByteBuffer();
}
}
throw new IllegalArgumentException("Can't find user metadata " + key);
}
@Override
public boolean hasMetadataValue(String key) {
for(OrcProto.UserMetadataItem item: userMetadata) {
if (item.hasName() && item.getName().equals(key)) {
return true;
}
}
return false;
}
@Override
public org.apache.orc.CompressionKind getCompressionKind() {
return compressionKind;
}
@Override
public int getCompressionSize() {
return bufferSize;
}
@Override
public List<StripeInformation> getStripes() {
return stripes;
}
@Override
public long getContentLength() {
return contentLength;
}
@Override
public List<OrcProto.Type> getTypes() {
return OrcUtils.getOrcTypes(schema);
}
public static OrcFile.Version getFileVersion(List<Integer> versionList) {
if (versionList == null || versionList.isEmpty()) {
return OrcFile.Version.V_0_11;
}
for (OrcFile.Version version: OrcFile.Version.values()) {
if (version.getMajor() == versionList.get(0) &&
version.getMinor() == versionList.get(1)) {
return version;
}
}
return OrcFile.Version.FUTURE;
}
@Override
public OrcFile.Version getFileVersion() {
return getFileVersion(versionList);
}
@Override
public OrcFile.WriterVersion getWriterVersion() {
return writerVersion;
}
@Override
public String getSoftwareVersion() {
return softwareVersion;
}
@Override
public OrcProto.FileTail getFileTail() {
return tail.getFileTail();
}
@Override
public EncryptionKey[] getColumnEncryptionKeys() {
return encryption.getKeys();
}
@Override
public DataMaskDescription[] getDataMasks() {
return encryption.getMasks();
}
@Override
public ReaderEncryptionVariant[] getEncryptionVariants() {
return encryption.getVariants();
}
@Override
public List<StripeStatistics> getVariantStripeStatistics(EncryptionVariant variant)
throws IOException {
if (variant == null) {
if (stripeStatistics == null) {
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
InStream.StreamOptions options = new InStream.StreamOptions();
if (codec != null) {
options.withCodec(codec).withBufferSize(bufferSize);
}
// deserialize the unencrypted stripe statistics
stripeStatistics = deserializeStripeStats(tail.getTailBuffer(),
tail.getMetadataOffset(), tail.getMetadataSize(), options);
}
}
return convertFromProto(stripeStatistics);
} else {
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
InStream.StreamOptions compression = new InStream.StreamOptions();
if (codec != null) {
compression.withCodec(codec).withBufferSize(bufferSize);
}
return ((ReaderEncryptionVariant) variant).getStripeStatistics(null,
compression, this);
}
}
}
/**
* Internal access to our view of the encryption.
* @return the encryption information for this reader.
*/
public ReaderEncryption getEncryption() {
return encryption;
}
@Override
public int getRowIndexStride() {
return rowIndexStride;
}
@Override
public ColumnStatistics[] getStatistics() {
ColumnStatistics[] result = deserializeStats(schema, fileStats);
if (encryption.getKeys().length > 0) {
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
InStream.StreamOptions compression = InStream.options();
if (codec != null) {
compression.withCodec(codec).withBufferSize(bufferSize);
}
for (int c = schema.getId(); c <= schema.getMaximumId(); ++c) {
ReaderEncryptionVariant variant = encryption.getVariant(c);
if (variant != null) {
try {
int base = variant.getRoot().getId();
ColumnStatistics[] overrides = decryptFileStats(variant,
compression, tail.getFooter());
for(int sub=0; sub < overrides.length; ++sub) {
result[base + sub] = overrides[sub];
}
} catch (IOException e) {
throw new RuntimeException("Can't decrypt file stats for " + path +
" with " + variant.getKeyDescription());
}
}
}
}
}
return result;
}
private ColumnStatistics[] decryptFileStats(ReaderEncryptionVariant encryption,
InStream.StreamOptions compression,
OrcProto.Footer footer
) throws IOException {
Key key = encryption.getFileFooterKey();
if (key == null) {
return null;
} else {
OrcProto.EncryptionVariant protoVariant =
footer.getEncryption().getVariants(encryption.getVariantId());
byte[] bytes = protoVariant.getFileStatistics().toByteArray();
BufferChunk buffer = new BufferChunk(ByteBuffer.wrap(bytes), 0);
EncryptionAlgorithm algorithm = encryption.getKeyDescription().getAlgorithm();
byte[] iv = new byte[algorithm.getIvLength()];
CryptoUtils.modifyIvForStream(encryption.getRoot().getId(),
OrcProto.Stream.Kind.FILE_STATISTICS, footer.getStripesCount() + 1)
.accept(iv);
InStream.StreamOptions options = new InStream.StreamOptions(compression)
.withEncryption(algorithm, key, iv);
InStream in = InStream.create("encrypted file stats", buffer,
0, bytes.length, options);
OrcProto.FileStatistics decrypted = OrcProto.FileStatistics.parseFrom(in);
ColumnStatistics[] result = new ColumnStatistics[decrypted.getColumnCount()];
TypeDescription root = encryption.getRoot();
for(int i= 0; i < result.length; ++i){
result[i] = ColumnStatisticsImpl.deserialize(root.findSubtype(root.getId() + i),
decrypted.getColumn(i), writerUsedProlepticGregorian(),
getConvertToProlepticGregorian());
}
return result;
}
}
public ColumnStatistics[] deserializeStats(
TypeDescription schema,
List<OrcProto.ColumnStatistics> fileStats) {
ColumnStatistics[] result = new ColumnStatistics[fileStats.size()];
for(int i=0; i < result.length; ++i) {
TypeDescription subschema = schema == null ? null : schema.findSubtype(i);
result[i] = ColumnStatisticsImpl.deserialize(subschema, fileStats.get(i),
writerUsedProlepticGregorian(),
getConvertToProlepticGregorian());
}
return result;
}
@Override
public TypeDescription getSchema() {
return schema;
}
/**
* Ensure this is an ORC file to prevent users from trying to read text
* files or RC files as ORC files.
* @param in the file being read
* @param path the filename for error messages
* @param psLen the postscript length
* @param buffer the tail of the file
*/
protected static void ensureOrcFooter(FSDataInputStream in,
Path path,
int psLen,
ByteBuffer buffer) throws IOException {
int magicLength = OrcFile.MAGIC.length();
int fullLength = magicLength + 1;
if (psLen < fullLength || buffer.remaining() < fullLength) {
throw new FileFormatException("Malformed ORC file " + path +
". Invalid postscript length " + psLen);
}
int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - fullLength;
byte[] array = buffer.array();
// now look for the magic string at the end of the postscript.
if (!Text.decode(array, offset, magicLength).equals(OrcFile.MAGIC)) {
// If it isn't there, this may be the 0.11.0 version of ORC.
// Read the first 3 bytes of the file to check for the header
byte[] header = new byte[magicLength];
in.readFully(0, header, 0, magicLength);
// if it isn't there, this isn't an ORC file
if (!Text.decode(header, 0 , magicLength).equals(OrcFile.MAGIC)) {
throw new FileFormatException("Malformed ORC file " + path +
". Invalid postscript.");
}
}
}
/**
* Ensure this is an ORC file to prevent users from trying to read text
* files or RC files as ORC files.
* @param psLen the postscript length
* @param buffer the tail of the file
* @deprecated Use {@link ReaderImpl#ensureOrcFooter(FSDataInputStream, Path, int, ByteBuffer)} instead.
*/
protected static void ensureOrcFooter(ByteBuffer buffer, int psLen) throws IOException {
int magicLength = OrcFile.MAGIC.length();
int fullLength = magicLength + 1;
if (psLen < fullLength || buffer.remaining() < fullLength) {
throw new FileFormatException("Malformed ORC file. Invalid postscript length " + psLen);
}
int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - fullLength;
byte[] array = buffer.array();
// now look for the magic string at the end of the postscript.
if (!Text.decode(array, offset, magicLength).equals(OrcFile.MAGIC)) {
// if it isn't there, this may be 0.11.0 version of the ORC file.
// Read the first 3 bytes from the buffer to check for the header
if (!Text.decode(buffer.array(), 0, magicLength).equals(OrcFile.MAGIC)) {
throw new FileFormatException("Malformed ORC file. Invalid postscript length " + psLen);
}
}
}
/**
* Build a version string out of an array.
* @param version the version number as a list
* @return the human readable form of the version string
*/
private static String versionString(List<Integer> version) {
StringBuilder buffer = new StringBuilder();
for(int i=0; i < version.size(); ++i) {
if (i != 0) {
buffer.append('.');
}
buffer.append(version.get(i));
}
return buffer.toString();
}
/**
* Check to see if this ORC file is from a future version and if so,
* warn the user that we may not be able to read all of the column encodings.
* @param path the data source path for error messages
* @param postscript the parsed postscript
*/
protected static void checkOrcVersion(Path path,
OrcProto.PostScript postscript
) throws IOException {
List<Integer> version = postscript.getVersionList();
if (getFileVersion(version) == OrcFile.Version.FUTURE) {
throw new IOException(path + " was written by a future ORC version " +
versionString(version) + ". This file is not readable by this version of ORC.\n"+
"Postscript: " + TextFormat.shortDebugString(postscript));
}
}
/**
* Constructor that let's the user specify additional options.
* @param path pathname for file
* @param options options for reading
*/
public ReaderImpl(Path path, OrcFile.ReaderOptions options) throws IOException {
this.path = path;
this.options = options;
this.conf = options.getConfiguration();
this.maxLength = options.getMaxLength();
this.useUTCTimestamp = options.getUseUTCTimestamp();
FileMetadata fileMetadata = options.getFileMetadata();
if (fileMetadata != null) {
this.compressionKind = fileMetadata.getCompressionKind();
this.bufferSize = fileMetadata.getCompressionBufferSize();
this.metadataSize = fileMetadata.getMetadataSize();
this.stripeStatistics = fileMetadata.getStripeStats();
this.versionList = fileMetadata.getVersionList();
OrcFile.WriterImplementation writer =
OrcFile.WriterImplementation.from(fileMetadata.getWriterImplementation());
this.writerVersion =
OrcFile.WriterVersion.from(writer, fileMetadata.getWriterVersionNum());
List<OrcProto.Type> types = fileMetadata.getTypes();
OrcUtils.isValidTypeTree(types, 0);
this.schema = OrcUtils.convertTypeFromProtobuf(types, 0);
this.rowIndexStride = fileMetadata.getRowIndexStride();
this.contentLength = fileMetadata.getContentLength();
this.numberOfRows = fileMetadata.getNumberOfRows();
this.fileStats = fileMetadata.getFileStats();
this.stripes = fileMetadata.getStripes();
this.tail = null;
this.userMetadata = null; // not cached and not needed here
// FileMetadata is obsolete and doesn't support encryption
this.encryption = new ReaderEncryption();
this.softwareVersion = null;
} else {
OrcTail orcTail = options.getOrcTail();
if (orcTail == null) {
tail = extractFileTail(getFileSystem(), path, options.getMaxLength());
options.orcTail(tail);
} else {
checkOrcVersion(path, orcTail.getPostScript());
tail = orcTail;
}
this.compressionKind = tail.getCompressionKind();
this.bufferSize = tail.getCompressionBufferSize();
this.metadataSize = tail.getMetadataSize();
this.versionList = tail.getPostScript().getVersionList();
this.schema = tail.getSchema();
this.rowIndexStride = tail.getFooter().getRowIndexStride();
this.contentLength = tail.getFooter().getContentLength();
this.numberOfRows = tail.getFooter().getNumberOfRows();
this.userMetadata = tail.getFooter().getMetadataList();
this.fileStats = tail.getFooter().getStatisticsList();
this.writerVersion = tail.getWriterVersion();
this.stripes = tail.getStripes();
this.stripeStatistics = null;
OrcProto.Footer footer = tail.getFooter();
this.encryption = new ReaderEncryption(footer, schema,
tail.getStripeStatisticsOffset(), tail.getTailBuffer(), stripes,
options.getKeyProvider(), conf);
this.softwareVersion = OrcUtils.getSoftwareVersion(footer.getWriter(),
footer.getSoftwareVersion());
}
this.types = OrcUtils.getOrcTypes(schema);
}
protected FileSystem getFileSystem() throws IOException {
FileSystem fileSystem = options.getFilesystem();
if (fileSystem == null) {
fileSystem = path.getFileSystem(options.getConfiguration());
options.filesystem(fileSystem);
}
return fileSystem;
}
protected Supplier<FileSystem> getFileSystemSupplier() {
return () -> {
try {
return getFileSystem();
} catch (IOException e) {
throw new RuntimeException("Can't create filesystem", e);
}
};
}
/**
* Get the WriterVersion based on the ORC file postscript.
* @param writerVersion the integer writer version
* @return the version of the software that produced the file
*/
public static OrcFile.WriterVersion getWriterVersion(int writerVersion) {
for(OrcFile.WriterVersion version: OrcFile.WriterVersion.values()) {
if (version.getId() == writerVersion) {
return version;
}
}
return OrcFile.WriterVersion.FUTURE;
}
public static OrcProto.Metadata extractMetadata(ByteBuffer bb, int metadataAbsPos,
int metadataSize, InStream.StreamOptions options) throws IOException {
bb.position(metadataAbsPos);
bb.limit(metadataAbsPos + metadataSize);
return OrcProto.Metadata.parseFrom(InStream.createCodedInputStream(
InStream.create("metadata", new BufferChunk(bb, 0), 0, metadataSize, options)));
}
private static OrcProto.PostScript extractPostScript(BufferChunk buffer,
Path path,
int psLen,
long psOffset
) throws IOException {
CodedInputStream in = InStream.createCodedInputStream(
InStream.create("ps", buffer, psOffset, psLen));
OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);
checkOrcVersion(path, ps);
// Check compression codec.
switch (ps.getCompression()) {
case NONE:
case ZLIB:
case SNAPPY:
case LZO:
case LZ4:
case ZSTD:
break;
default:
throw new IllegalArgumentException("Unknown compression");
}
return ps;
}
/**
* Build a virtual OrcTail for empty files.
* @return a new OrcTail
*/
OrcTail buildEmptyTail() throws IOException {
OrcProto.PostScript.Builder postscript = OrcProto.PostScript.newBuilder();
OrcFile.Version version = OrcFile.Version.CURRENT;
postscript.setMagic(OrcFile.MAGIC)
.setCompression(OrcProto.CompressionKind.NONE)
.setFooterLength(0)
.addVersion(version.getMajor())
.addVersion(version.getMinor())
.setMetadataLength(0)
.setWriterVersion(OrcFile.CURRENT_WRITER.getId());
// Use a struct with no fields
OrcProto.Type.Builder struct = OrcProto.Type.newBuilder();
struct.setKind(OrcProto.Type.Kind.STRUCT);
OrcProto.Footer.Builder footer = OrcProto.Footer.newBuilder();
footer.setHeaderLength(0)
.setContentLength(0)
.addTypes(struct)
.setNumberOfRows(0)
.setRowIndexStride(0);
OrcProto.FileTail.Builder result = OrcProto.FileTail.newBuilder();
result.setFooter(footer);
result.setPostscript(postscript);
result.setFileLength(0);
result.setPostscriptLength(0);
return new OrcTail(result.build(), new BufferChunk(0, 0), -1, this);
}
private static void read(FSDataInputStream file,
BufferChunk chunks) throws IOException {
while (chunks != null) {
if (!chunks.hasData()) {
int len = chunks.getLength();
ByteBuffer bb = ByteBuffer.allocate(len);
file.readFully(chunks.getOffset(), bb.array(), bb.arrayOffset(), len);
chunks.setChunk(bb);
}
chunks = (BufferChunk) chunks.next;
}
}
/**
* @deprecated Use {@link ReaderImpl#extractFileTail(FileSystem, Path, long)} instead.
* This is for backward compatibility.
*/
public static OrcTail extractFileTail(ByteBuffer buffer)
throws IOException {
return extractFileTail(buffer, -1,-1);
}
/**
* Read compression block size from the postscript if it is set; otherwise,
* use the same 256k default the C++ implementation uses.
*/
public static int getCompressionBlockSize(OrcProto.PostScript postScript) {
if (postScript.hasCompressionBlockSize()) {
return (int) postScript.getCompressionBlockSize();
} else {
return DEFAULT_COMPRESSION_BLOCK_SIZE;
}
}
/**
* @deprecated Use {@link ReaderImpl#extractFileTail(FileSystem, Path, long)} instead.
* This is for backward compatibility.
*/
public static OrcTail extractFileTail(ByteBuffer buffer, long fileLen, long modificationTime)
throws IOException {
OrcProto.PostScript ps;
long readSize = buffer.limit();
OrcProto.FileTail.Builder fileTailBuilder = OrcProto.FileTail.newBuilder();
fileTailBuilder.setFileLength(fileLen != -1 ? fileLen : readSize);
int psLen = buffer.get((int) (readSize - 1)) & 0xff;
int psOffset = (int) (readSize - 1 - psLen);
ensureOrcFooter(buffer, psLen);
byte[] psBuffer = new byte[psLen];
System.arraycopy(buffer.array(), psOffset, psBuffer, 0, psLen);
ps = OrcProto.PostScript.parseFrom(psBuffer);
int footerSize = (int) ps.getFooterLength();
CompressionKind compressionKind =
CompressionKind.valueOf(ps.getCompression().name());
fileTailBuilder.setPostscriptLength(psLen).setPostscript(ps);
InStream.StreamOptions compression = new InStream.StreamOptions();
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)){
if (codec != null) {
compression.withCodec(codec)
.withBufferSize(getCompressionBlockSize(ps));
}
OrcProto.Footer footer =
OrcProto.Footer.parseFrom(
InStream.createCodedInputStream(
InStream.create("footer", new BufferChunk(buffer, 0),
psOffset - footerSize, footerSize, compression)));
fileTailBuilder.setPostscriptLength(psLen).setFooter(footer);
}
// clear does not clear the contents but sets position to 0 and limit = capacity
buffer.clear();
return new OrcTail(fileTailBuilder.build(),
new BufferChunk(buffer.slice(), 0), modificationTime);
}
protected OrcTail extractFileTail(FileSystem fs, Path path,
long maxFileLength) throws IOException {
BufferChunk buffer;
OrcProto.PostScript ps;
OrcProto.FileTail.Builder fileTailBuilder = OrcProto.FileTail.newBuilder();
long modificationTime;
file = fs.open(path);
try {
// figure out the size of the file using the option or filesystem
long size;
if (maxFileLength == Long.MAX_VALUE) {
FileStatus fileStatus = fs.getFileStatus(path);
size = fileStatus.getLen();
modificationTime = fileStatus.getModificationTime();
} else {
size = maxFileLength;
modificationTime = -1;
}
if (size == 0) {
// Hive often creates empty files (including ORC) and has an
// optimization to create a 0 byte file as an empty ORC file.
return buildEmptyTail();
} else if (size <= OrcFile.MAGIC.length()) {
// Anything smaller than MAGIC header cannot be valid (valid ORC files
// are actually around 40 bytes, this is more conservative)
throw new FileFormatException("Not a valid ORC file " + path
+ " (maxFileLength= " + maxFileLength + ")");
}
fileTailBuilder.setFileLength(size);
//read last bytes into buffer to get PostScript
int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
buffer = new BufferChunk(size - readSize, readSize);
read(file, buffer);
//read the PostScript
//get length of PostScript
ByteBuffer bb = buffer.getData();
int psLen = bb.get(readSize - 1) & 0xff;
ensureOrcFooter(file, path, psLen, bb);
long psOffset = size - 1 - psLen;
ps = extractPostScript(buffer, path, psLen, psOffset);
CompressionKind compressionKind =
CompressionKind.valueOf(ps.getCompression().name());
fileTailBuilder.setPostscriptLength(psLen).setPostscript(ps);
int footerSize = (int) ps.getFooterLength();
int metadataSize = (int) ps.getMetadataLength();
int stripeStatSize = (int) ps.getStripeStatisticsLength();
//check if extra bytes need to be read
int tailSize = 1 + psLen + footerSize + metadataSize + stripeStatSize;
int extra = Math.max(0, tailSize - readSize);
if (extra > 0) {
//more bytes need to be read, seek back to the right place and read extra bytes
BufferChunk orig = buffer;
buffer = new BufferChunk(size - tailSize, extra);
buffer.next = orig;
orig.prev = buffer;
read(file, buffer);
}
InStream.StreamOptions compression = new InStream.StreamOptions();
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
if (codec != null) {
compression.withCodec(codec)
.withBufferSize(getCompressionBlockSize(ps));
}
OrcProto.Footer footer =
OrcProto.Footer.parseFrom(
InStream.createCodedInputStream(
InStream.create("footer", buffer, psOffset - footerSize,
footerSize, compression)));
fileTailBuilder.setFooter(footer);
}
} catch (Throwable thr) {
try {
close();
} catch (IOException except) {
LOG.info("Ignoring secondary exception in close of " + path, except);
}
throw thr instanceof IOException ? (IOException) thr :
new IOException("Problem reading file footer " + path, thr);
}
return new OrcTail(fileTailBuilder.build(), buffer, modificationTime, this);
}
@Override
public ByteBuffer getSerializedFileFooter() {
return tail.getSerializedTail();
}
@Override
public boolean writerUsedProlepticGregorian() {
OrcProto.Footer footer = tail.getFooter();
return footer.hasCalendar()
? footer.getCalendar() == OrcProto.CalendarKind.PROLEPTIC_GREGORIAN
: OrcConf.PROLEPTIC_GREGORIAN_DEFAULT.getBoolean(conf);
}
@Override
public boolean getConvertToProlepticGregorian() {
return options.getConvertToProlepticGregorian();
}
@Override
public Options options() {
return new Options(conf);
}
@Override
public RecordReader rows() throws IOException {
return rows(options());
}
@Override
public RecordReader rows(Options options) throws IOException {
LOG.info("Reading ORC rows from " + path + " with " + options);
return new RecordReaderImpl(this, options);
}
@Override
public long getRawDataSize() {
// if the deserializedSize is not computed, then compute it, else
// return the already computed size. since we are reading from the footer
// we don't have to compute deserialized size repeatedly
if (deserializedSize == -1) {
List<Integer> indices = new ArrayList<>();
for (int i = 0; i < fileStats.size(); ++i) {
indices.add(i);
}
deserializedSize = getRawDataSizeFromColIndices(indices);
}
return deserializedSize;
}
@Override
public long getRawDataSizeFromColIndices(List<Integer> colIndices) {
boolean[] include = new boolean[schema.getMaximumId() + 1];
for(Integer rootId: colIndices) {
TypeDescription root = schema.findSubtype(rootId);
for(int c = root.getId(); c <= root.getMaximumId(); ++c) {
include[c] = true;
}
}
return getRawDataSizeFromColIndices(include, schema, fileStats);
}
public static long getRawDataSizeFromColIndices(
List<Integer> colIndices,
List<OrcProto.Type> types,
List<OrcProto.ColumnStatistics> stats)
throws FileFormatException {
TypeDescription schema = OrcUtils.convertTypeFromProtobuf(types, 0);
boolean[] include = new boolean[schema.getMaximumId() + 1];
for(Integer rootId: colIndices) {
TypeDescription root = schema.findSubtype(rootId);
for(int c = root.getId(); c <= root.getMaximumId(); ++c) {
include[c] = true;
}
}
return getRawDataSizeFromColIndices(include, schema, stats);
}
static long getRawDataSizeFromColIndices(boolean[] include,
TypeDescription schema,
List<OrcProto.ColumnStatistics> stats) {
long result = 0;
for (int c = schema.getId(); c <= schema.getMaximumId(); ++c) {
if (include[c]) {
result += getRawDataSizeOfColumn(schema.findSubtype(c), stats);
}
}
return result;
}
private static long getRawDataSizeOfColumn(TypeDescription column,
List<OrcProto.ColumnStatistics> stats) {
OrcProto.ColumnStatistics colStat = stats.get(column.getId());
long numVals = colStat.getNumberOfValues();
switch (column.getCategory()) {
case BINARY:
// old orc format doesn't support binary statistics. checking for binary
// statistics is not required as protocol buffers takes care of it.
return colStat.getBinaryStatistics().getSum();
case STRING:
case CHAR:
case VARCHAR:
// old orc format doesn't support sum for string statistics. checking for
// existence is not required as protocol buffers takes care of it.
// ORC strings are deserialized to java strings. so use java data model's
// string size
numVals = numVals == 0 ? 1 : numVals;
int avgStrLen = (int) (colStat.getStringStatistics().getSum() / numVals);
return numVals * JavaDataModel.get().lengthForStringOfLength(avgStrLen);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return numVals * JavaDataModel.get().lengthOfTimestamp();
case DATE:
return numVals * JavaDataModel.get().lengthOfDate();
case DECIMAL:
return numVals * JavaDataModel.get().lengthOfDecimal();
case DOUBLE:
case LONG:
return numVals * JavaDataModel.get().primitive2();
case FLOAT:
case INT:
case SHORT:
case BOOLEAN:
case BYTE:
case STRUCT:
case UNION:
case MAP:
case LIST:
return numVals * JavaDataModel.get().primitive1();
default:
LOG.debug("Unknown primitive category: {}", column.getCategory());
break;
}
return 0;
}
@Override
public long getRawDataSizeOfColumns(List<String> colNames) {
boolean[] include = new boolean[schema.getMaximumId() + 1];
for(String name: colNames) {
TypeDescription sub = schema.findSubtype(name);
for(int c = sub.getId(); c <= sub.getMaximumId(); ++c) {
include[c] = true;
}
}
return getRawDataSizeFromColIndices(include, schema, fileStats);
}
@Override
public List<OrcProto.StripeStatistics> getOrcProtoStripeStatistics() {
if (stripeStatistics == null) {
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
InStream.StreamOptions options = new InStream.StreamOptions();
if (codec != null) {
options.withCodec(codec).withBufferSize(bufferSize);
}
stripeStatistics = deserializeStripeStats(tail.getTailBuffer(),
tail.getMetadataOffset(), tail.getMetadataSize(), options);
} catch (IOException ioe) {
throw new RuntimeException("Can't deserialize stripe stats", ioe);
}
}
return stripeStatistics;
}
@Override
public List<OrcProto.ColumnStatistics> getOrcProtoFileStatistics() {
return fileStats;
}
private static List<OrcProto.StripeStatistics> deserializeStripeStats(
BufferChunk tailBuffer,
long offset,
int length,
InStream.StreamOptions options) throws IOException {
try (InStream stream = InStream.create("stripe stats", tailBuffer, offset,
length, options)) {
OrcProto.Metadata meta = OrcProto.Metadata.parseFrom(
InStream.createCodedInputStream(stream));
return meta.getStripeStatsList();
} catch (InvalidProtocolBufferException e) {
LOG.warn("Failed to parse stripe statistics", e);
return Collections.emptyList();
}
}
private List<StripeStatistics> convertFromProto(List<OrcProto.StripeStatistics> list) {
if (list == null) {
return null;
} else {
List<StripeStatistics> result = new ArrayList<>(list.size());
for (OrcProto.StripeStatistics ss : stripeStatistics) {
result.add(new StripeStatisticsImpl(schema,
new ArrayList<>(ss.getColStatsList()), writerUsedProlepticGregorian(),
getConvertToProlepticGregorian()));
}
return result;
}
}
@Override
public List<StripeStatistics> getStripeStatistics() throws IOException {
return getStripeStatistics(null);
}
@Override
public List<StripeStatistics> getStripeStatistics(boolean[] included) throws IOException {
List<StripeStatistics> result = convertFromProto(stripeStatistics);
if (result == null || encryption.getVariants().length > 0) {
try (CompressionCodec codec = OrcCodecPool.getCodec(compressionKind)) {
InStream.StreamOptions options = new InStream.StreamOptions();
if (codec != null) {
options.withCodec(codec).withBufferSize(bufferSize);
}
result = getVariantStripeStatistics(null);
if (encryption.getVariants().length > 0) {
// process any encrypted overrides that we have the key for
for (int c = schema.getId(); c <= schema.getMaximumId(); ++c) {
// only decrypt the variants that we need
if (included == null || included[c]) {
ReaderEncryptionVariant variant = encryption.getVariant(c);
if (variant != null) {
TypeDescription variantType = variant.getRoot();
List<StripeStatistics> colStats =
variant.getStripeStatistics(included, options, this);
for(int sub = c; sub <= variantType.getMaximumId(); ++sub) {
if (included == null || included[sub]) {
for(int s = 0; s < colStats.size(); ++s) {
StripeStatisticsImpl resultElem = (StripeStatisticsImpl) result.get(s);
resultElem.updateColumn(sub,
colStats.get(s).getColumn(sub - variantType.getId()));
}
}
}
c = variantType.getMaximumId();
}
}
}
}
}
}
return result;
}
@Override
public List<Integer> getVersionList() {
return versionList;
}
@Override
public int getMetadataSize() {
return metadataSize;
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("ORC Reader(");
buffer.append(path);
if (maxLength != -1) {
buffer.append(", ");
buffer.append(maxLength);
}
buffer.append(")");
return buffer.toString();
}
@Override
public void close() throws IOException {
if (file != null) {
file.close();
}
}
/**
* Take the file from the reader.
* This allows the first RecordReader to use the same file, but additional
* RecordReaders will open new handles.
* @return a file handle, if one is available
*/
public FSDataInputStream takeFile() {
FSDataInputStream result = file;
file = null;
return result;
}
}
| 41,114 | 34.65915 | 106 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RecordReaderImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
import org.apache.hadoop.hive.ql.util.TimestampUtils;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.BooleanColumnStatistics;
import org.apache.orc.CollectionColumnStatistics;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionCodec;
import org.apache.orc.DataReader;
import org.apache.orc.DateColumnStatistics;
import org.apache.orc.DecimalColumnStatistics;
import org.apache.orc.DoubleColumnStatistics;
import org.apache.orc.IntegerColumnStatistics;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.StringColumnStatistics;
import org.apache.orc.StripeInformation;
import org.apache.orc.TimestampColumnStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.filter.BatchFilter;
import org.apache.orc.impl.filter.FilterFactory;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.StripePlanner;
import org.apache.orc.impl.reader.tree.BatchReader;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneOffset;
import java.time.chrono.ChronoLocalDate;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.SortedSet;
import java.util.TimeZone;
import java.util.TreeSet;
import java.util.function.Consumer;
public class RecordReaderImpl implements RecordReader {
static final Logger LOG = LoggerFactory.getLogger(RecordReaderImpl.class);
private static final boolean isLogDebugEnabled = LOG.isDebugEnabled();
// as public for use with test cases
public static final OrcProto.ColumnStatistics EMPTY_COLUMN_STATISTICS =
OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(0)
.setHasNull(false)
.setBytesOnDisk(0)
.build();
protected final Path path;
private final long firstRow;
private final List<StripeInformation> stripes = new ArrayList<>();
private OrcProto.StripeFooter stripeFooter;
private final long totalRowCount;
protected final TypeDescription schema;
// the file included columns indexed by the file's column ids.
private final boolean[] fileIncluded;
private final long rowIndexStride;
private long rowInStripe = 0;
// position of the follow reader within the stripe
private long followRowInStripe = 0;
private int currentStripe = -1;
private long rowBaseInStripe = 0;
private long rowCountInStripe = 0;
private final BatchReader reader;
private final OrcIndex indexes;
// identifies the columns requiring row indexes
private final boolean[] rowIndexColsToRead;
private final SargApplier sargApp;
// an array about which row groups aren't skipped
private boolean[] includedRowGroups = null;
private final DataReader dataReader;
private final int maxDiskRangeChunkLimit;
private final StripePlanner planner;
// identifies the type of read, ALL(read everything), LEADERS(read only the filter columns)
private final TypeReader.ReadPhase startReadPhase;
// identifies that follow columns bytes must be read
private boolean needsFollowColumnsRead;
private final boolean noSelectedVector;
// identifies whether the file has bad bloom filters that we should not use.
private final boolean skipBloomFilters;
static final String[] BAD_CPP_BLOOM_FILTER_VERSIONS = {
"1.6.0", "1.6.1", "1.6.2", "1.6.3", "1.6.4", "1.6.5", "1.6.6", "1.6.7", "1.6.8",
"1.6.9", "1.6.10", "1.6.11", "1.7.0"};
/**
* Given a list of column names, find the given column and return the index.
*
* @param evolution the mapping from reader to file schema
* @param columnName the fully qualified column name to look for
* @return the file column number or -1 if the column wasn't found in the file schema
* @throws IllegalArgumentException if the column was not found in the reader schema
*/
static int findColumns(SchemaEvolution evolution,
String columnName) {
TypeDescription fileColumn = findColumnType(evolution, columnName);
return fileColumn == null ? -1 : fileColumn.getId();
}
static TypeDescription findColumnType(SchemaEvolution evolution, String columnName) {
try {
TypeDescription readerColumn = evolution.getReaderBaseSchema().findSubtype(
columnName, evolution.isSchemaEvolutionCaseAware);
return evolution.getFileType(readerColumn);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Filter could not find column with name: " +
columnName + " on " + evolution.getReaderBaseSchema(),
e);
}
}
/**
* Given a column name such as 'a.b.c', this method returns the column 'a.b.c' if present in the
* file. In case 'a.b.c' is not found in file then it tries to look for 'a.b', then 'a'. If none
* are present then it shall return null.
*
* @param evolution the mapping from reader to file schema
* @param columnName the fully qualified column name to look for
* @return the file column type or null in case none of the branch columns are present in the file
* @throws IllegalArgumentException if the column was not found in the reader schema
*/
static TypeDescription findMostCommonColumn(SchemaEvolution evolution, String columnName) {
try {
TypeDescription readerColumn = evolution.getReaderBaseSchema().findSubtype(
columnName, evolution.isSchemaEvolutionCaseAware);
TypeDescription fileColumn;
do {
fileColumn = evolution.getFileType(readerColumn);
if (fileColumn == null) {
readerColumn = readerColumn.getParent();
} else {
return fileColumn;
}
} while (readerColumn != null);
return null;
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Filter could not find column with name: " +
columnName + " on " + evolution.getReaderBaseSchema(),
e);
}
}
/**
* Find the mapping from predicate leaves to columns.
* @param sargLeaves the search argument that we need to map
* @param evolution the mapping from reader to file schema
* @return an array mapping the sarg leaves to concrete column numbers in the
* file
*/
public static int[] mapSargColumnsToOrcInternalColIdx(
List<PredicateLeaf> sargLeaves,
SchemaEvolution evolution) {
int[] result = new int[sargLeaves.size()];
for (int i = 0; i < sargLeaves.size(); ++i) {
int colNum = -1;
try {
String colName = sargLeaves.get(i).getColumnName();
colNum = findColumns(evolution, colName);
} catch (IllegalArgumentException e) {
LOG.debug("{}", e.getMessage());
}
result[i] = colNum;
}
return result;
}
protected RecordReaderImpl(ReaderImpl fileReader,
Reader.Options options) throws IOException {
OrcFile.WriterVersion writerVersion = fileReader.getWriterVersion();
SchemaEvolution evolution;
if (options.getSchema() == null) {
LOG.info("Reader schema not provided -- using file schema " +
fileReader.getSchema());
evolution = new SchemaEvolution(fileReader.getSchema(), null, options);
} else {
// Now that we are creating a record reader for a file, validate that
// the schema to read is compatible with the file schema.
//
evolution = new SchemaEvolution(fileReader.getSchema(),
options.getSchema(),
options);
if (LOG.isDebugEnabled() && evolution.hasConversion()) {
LOG.debug("ORC file " + fileReader.path.toString() +
" has data type conversion --\n" +
"reader schema: " + options.getSchema().toString() + "\n" +
"file schema: " + fileReader.getSchema());
}
}
this.noSelectedVector = !options.useSelected();
LOG.debug("noSelectedVector={}", this.noSelectedVector);
this.schema = evolution.getReaderSchema();
this.path = fileReader.path;
this.rowIndexStride = fileReader.rowIndexStride;
boolean ignoreNonUtf8BloomFilter =
OrcConf.IGNORE_NON_UTF8_BLOOM_FILTERS.getBoolean(fileReader.conf);
ReaderEncryption encryption = fileReader.getEncryption();
this.fileIncluded = evolution.getFileIncluded();
SearchArgument sarg = options.getSearchArgument();
boolean[] rowIndexCols = new boolean[evolution.getFileIncluded().length];
if (sarg != null && rowIndexStride > 0) {
sargApp = new SargApplier(sarg,
rowIndexStride,
evolution,
writerVersion,
fileReader.useUTCTimestamp,
fileReader.writerUsedProlepticGregorian(),
fileReader.options.getConvertToProlepticGregorian());
sargApp.setRowIndexCols(rowIndexCols);
} else {
sargApp = null;
}
long rows = 0;
long skippedRows = 0;
long offset = options.getOffset();
long maxOffset = options.getMaxOffset();
for(StripeInformation stripe: fileReader.getStripes()) {
long stripeStart = stripe.getOffset();
if (offset > stripeStart) {
skippedRows += stripe.getNumberOfRows();
} else if (stripeStart < maxOffset) {
this.stripes.add(stripe);
rows += stripe.getNumberOfRows();
}
}
this.maxDiskRangeChunkLimit = OrcConf.ORC_MAX_DISK_RANGE_CHUNK_LIMIT.getInt(fileReader.conf);
Boolean zeroCopy = options.getUseZeroCopy();
if (zeroCopy == null) {
zeroCopy = OrcConf.USE_ZEROCOPY.getBoolean(fileReader.conf);
}
if (options.getDataReader() != null) {
this.dataReader = options.getDataReader().clone();
} else {
InStream.StreamOptions unencryptedOptions =
InStream.options()
.withCodec(OrcCodecPool.getCodec(fileReader.getCompressionKind()))
.withBufferSize(fileReader.getCompressionSize());
DataReaderProperties.Builder builder =
DataReaderProperties.builder()
.withCompression(unencryptedOptions)
.withFileSystemSupplier(fileReader.getFileSystemSupplier())
.withPath(fileReader.path)
.withMaxDiskRangeChunkLimit(maxDiskRangeChunkLimit)
.withZeroCopy(zeroCopy)
.withMinSeekSize(options.minSeekSize())
.withMinSeekSizeTolerance(options.minSeekSizeTolerance());
FSDataInputStream file = fileReader.takeFile();
if (file != null) {
builder.withFile(file);
}
this.dataReader = RecordReaderUtils.createDefaultDataReader(
builder.build());
}
firstRow = skippedRows;
totalRowCount = rows;
Boolean skipCorrupt = options.getSkipCorruptRecords();
if (skipCorrupt == null) {
skipCorrupt = OrcConf.SKIP_CORRUPT_DATA.getBoolean(fileReader.conf);
}
String[] filterCols = null;
Consumer<OrcFilterContext> filterCallBack = null;
String filePath = options.allowPluginFilters() ?
fileReader.getFileSystem().makeQualified(fileReader.path).toString() : null;
BatchFilter filter = FilterFactory.createBatchFilter(options,
evolution.getReaderBaseSchema(),
evolution.isSchemaEvolutionCaseAware(),
fileReader.getFileVersion(),
false,
filePath,
fileReader.conf);
if (filter != null) {
// If a filter is determined then use this
filterCallBack = filter;
filterCols = filter.getColumnNames();
}
// Map columnNames to ColumnIds
SortedSet<Integer> filterColIds = new TreeSet<>();
if (filterCols != null) {
for (String colName : filterCols) {
TypeDescription expandCol = findColumnType(evolution, colName);
// If the column is not present in the file then this can be ignored from read.
if (expandCol == null || expandCol.getId() == -1) {
// Add -1 to filter columns so that the NullTreeReader is invoked during the LEADERS phase
filterColIds.add(-1);
// Determine the common parent and include these
expandCol = findMostCommonColumn(evolution, colName);
}
while (expandCol != null && expandCol.getId() != -1) {
// classify the column and the parent branch as LEAD
filterColIds.add(expandCol.getId());
rowIndexCols[expandCol.getId()] = true;
expandCol = expandCol.getParent();
}
}
this.startReadPhase = TypeReader.ReadPhase.LEADERS;
LOG.debug("Using startReadPhase: {} with filter columns: {}", startReadPhase, filterColIds);
} else {
this.startReadPhase = TypeReader.ReadPhase.ALL;
}
this.rowIndexColsToRead = ArrayUtils.contains(rowIndexCols, true) ? rowIndexCols : null;
TreeReaderFactory.ReaderContext readerContext =
new TreeReaderFactory.ReaderContext()
.setSchemaEvolution(evolution)
.setFilterCallback(filterColIds, filterCallBack)
.skipCorrupt(skipCorrupt)
.fileFormat(fileReader.getFileVersion())
.useUTCTimestamp(fileReader.useUTCTimestamp)
.setProlepticGregorian(fileReader.writerUsedProlepticGregorian(),
fileReader.options.getConvertToProlepticGregorian())
.setEncryption(encryption);
reader = TreeReaderFactory.createRootReader(evolution.getReaderSchema(), readerContext);
skipBloomFilters = hasBadBloomFilters(fileReader.getFileTail().getFooter());
int columns = evolution.getFileSchema().getMaximumId() + 1;
indexes = new OrcIndex(new OrcProto.RowIndex[columns],
new OrcProto.Stream.Kind[columns],
new OrcProto.BloomFilterIndex[columns]);
planner = new StripePlanner(evolution.getFileSchema(), encryption,
dataReader, writerVersion, ignoreNonUtf8BloomFilter,
maxDiskRangeChunkLimit, filterColIds);
try {
advanceToNextRow(reader, 0L, true);
} catch (Exception e) {
// Try to close since this happens in constructor.
close();
long stripeId = stripes.size() == 0 ? 0 : stripes.get(0).getStripeId();
throw new IOException(String.format("Problem opening stripe %d footer in %s.",
stripeId, path), e);
}
}
/**
* Check if the file has inconsistent bloom filters. We will skip using them
* in the following reads.
* @return true if it has.
*/
private boolean hasBadBloomFilters(OrcProto.Footer footer) {
// Only C++ writer in old releases could have bad bloom filters.
if (footer.getWriter() != 1) return false;
// 'softwareVersion' is added in 1.5.13, 1.6.11, and 1.7.0.
// 1.6.x releases before 1.6.11 won't have it. On the other side, the C++ writer
// supports writing bloom filters since 1.6.0. So files written by the C++ writer
// and with 'softwareVersion' unset would have bad bloom filters.
if (!footer.hasSoftwareVersion()) return true;
String fullVersion = footer.getSoftwareVersion();
String version = fullVersion;
// Deal with snapshot versions, e.g. 1.6.12-SNAPSHOT.
if (fullVersion.contains("-")) {
version = fullVersion.substring(0, fullVersion.indexOf('-'));
}
for (String v : BAD_CPP_BLOOM_FILTER_VERSIONS) {
if (v.equals(version)) {
return true;
}
}
return false;
}
public static final class PositionProviderImpl implements PositionProvider {
private final OrcProto.RowIndexEntry entry;
private int index;
public PositionProviderImpl(OrcProto.RowIndexEntry entry) {
this(entry, 0);
}
public PositionProviderImpl(OrcProto.RowIndexEntry entry, int startPos) {
this.entry = entry;
this.index = startPos;
}
@Override
public long getNext() {
return entry.getPositions(index++);
}
}
public static final class ZeroPositionProvider implements PositionProvider {
@Override
public long getNext() {
return 0;
}
}
public OrcProto.StripeFooter readStripeFooter(StripeInformation stripe
) throws IOException {
return dataReader.readStripeFooter(stripe);
}
enum Location {
BEFORE, MIN, MIDDLE, MAX, AFTER
}
static class ValueRange<T extends Comparable> {
final Comparable lower;
final Comparable upper;
final boolean onlyLowerBound;
final boolean onlyUpperBound;
final boolean hasNulls;
final boolean hasValue;
final boolean comparable;
ValueRange(PredicateLeaf predicate,
T lower, T upper,
boolean hasNulls,
boolean onlyLowerBound,
boolean onlyUpperBound,
boolean hasValue,
boolean comparable) {
PredicateLeaf.Type type = predicate.getType();
this.lower = getBaseObjectForComparison(type, lower);
this.upper = getBaseObjectForComparison(type, upper);
this.hasNulls = hasNulls;
this.onlyLowerBound = onlyLowerBound;
this.onlyUpperBound = onlyUpperBound;
this.hasValue = hasValue;
this.comparable = comparable;
}
ValueRange(PredicateLeaf predicate,
T lower, T upper,
boolean hasNulls,
boolean onlyLowerBound,
boolean onlyUpperBound) {
this(predicate, lower, upper, hasNulls, onlyLowerBound, onlyUpperBound,
lower != null, lower != null);
}
ValueRange(PredicateLeaf predicate, T lower, T upper,
boolean hasNulls) {
this(predicate, lower, upper, hasNulls, false, false);
}
/**
* A value range where the data is either missing or all null.
* @param predicate the predicate to test
* @param hasNulls whether there are nulls
*/
ValueRange(PredicateLeaf predicate, boolean hasNulls) {
this(predicate, null, null, hasNulls, false, false);
}
boolean hasValues() {
return hasValue;
}
/**
* Whether min or max is provided for comparison
* @return is it comparable
*/
boolean isComparable() {
return hasValue && comparable;
}
/**
* value range is invalid if the column statistics are non-existent
* @see ColumnStatisticsImpl#isStatsExists()
* this method is similar to isStatsExists
* @return value range is valid or not
*/
boolean isValid() {
return hasValue || hasNulls;
}
/**
* Given a point and min and max, determine if the point is before, at the
* min, in the middle, at the max, or after the range.
* @param point the point to test
* @return the location of the point
*/
Location compare(Comparable point) {
int minCompare = point.compareTo(lower);
if (minCompare < 0) {
return Location.BEFORE;
} else if (minCompare == 0) {
return onlyLowerBound ? Location.BEFORE : Location.MIN;
}
int maxCompare = point.compareTo(upper);
if (maxCompare > 0) {
return Location.AFTER;
} else if (maxCompare == 0) {
return onlyUpperBound ? Location.AFTER : Location.MAX;
}
return Location.MIDDLE;
}
/**
* Is this range a single point?
* @return true if min == max
*/
boolean isSingleton() {
return lower != null && !onlyLowerBound && !onlyUpperBound &&
lower.equals(upper);
}
/**
* Add the null option to the truth value, if the range includes nulls.
* @param value the original truth value
* @return the truth value extended with null if appropriate
*/
TruthValue addNull(TruthValue value) {
if (hasNulls) {
switch (value) {
case YES:
return TruthValue.YES_NULL;
case NO:
return TruthValue.NO_NULL;
case YES_NO:
return TruthValue.YES_NO_NULL;
default:
return value;
}
} else {
return value;
}
}
}
/**
* Get the maximum value out of an index entry.
* Includes option to specify if timestamp column stats values
* should be in UTC.
* @param index the index entry
* @param predicate the kind of predicate
* @param useUTCTimestamp use UTC for timestamps
* @return the object for the maximum value or null if there isn't one
*/
static ValueRange getValueRange(ColumnStatistics index,
PredicateLeaf predicate,
boolean useUTCTimestamp) {
if (index.getNumberOfValues() == 0) {
return new ValueRange<>(predicate, index.hasNull());
} else if (index instanceof IntegerColumnStatistics) {
IntegerColumnStatistics stats = (IntegerColumnStatistics) index;
Long min = stats.getMinimum();
Long max = stats.getMaximum();
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else if (index instanceof CollectionColumnStatistics) {
CollectionColumnStatistics stats = (CollectionColumnStatistics) index;
Long min = stats.getMinimumChildren();
Long max = stats.getMaximumChildren();
return new ValueRange<>(predicate, min, max, stats.hasNull());
}else if (index instanceof DoubleColumnStatistics) {
DoubleColumnStatistics stats = (DoubleColumnStatistics) index;
Double min = stats.getMinimum();
Double max = stats.getMaximum();
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else if (index instanceof StringColumnStatistics) {
StringColumnStatistics stats = (StringColumnStatistics) index;
return new ValueRange<>(predicate, stats.getLowerBound(),
stats.getUpperBound(), stats.hasNull(), stats.getMinimum() == null,
stats.getMaximum() == null);
} else if (index instanceof DateColumnStatistics) {
DateColumnStatistics stats = (DateColumnStatistics) index;
ChronoLocalDate min = stats.getMinimumLocalDate();
ChronoLocalDate max = stats.getMaximumLocalDate();
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else if (index instanceof DecimalColumnStatistics) {
DecimalColumnStatistics stats = (DecimalColumnStatistics) index;
HiveDecimal min = stats.getMinimum();
HiveDecimal max = stats.getMaximum();
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else if (index instanceof TimestampColumnStatistics) {
TimestampColumnStatistics stats = (TimestampColumnStatistics) index;
Timestamp min = useUTCTimestamp ? stats.getMinimumUTC() : stats.getMinimum();
Timestamp max = useUTCTimestamp ? stats.getMaximumUTC() : stats.getMaximum();
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else if (index instanceof BooleanColumnStatistics) {
BooleanColumnStatistics stats = (BooleanColumnStatistics) index;
Boolean min = stats.getFalseCount() == 0;
Boolean max = stats.getTrueCount() != 0;
return new ValueRange<>(predicate, min, max, stats.hasNull());
} else {
return new ValueRange(predicate, null, null, index.hasNull(), false, false, true, false);
}
}
/**
* Evaluate a predicate with respect to the statistics from the column
* that is referenced in the predicate.
* @param statsProto the statistics for the column mentioned in the predicate
* @param predicate the leaf predicate we need to evaluation
* @param bloomFilter the bloom filter
* @param writerVersion the version of software that wrote the file
* @param type what is the kind of this column
* @return the set of truth values that may be returned for the given
* predicate.
*/
static TruthValue evaluatePredicateProto(OrcProto.ColumnStatistics statsProto,
PredicateLeaf predicate,
OrcProto.Stream.Kind kind,
OrcProto.ColumnEncoding encoding,
OrcProto.BloomFilter bloomFilter,
OrcFile.WriterVersion writerVersion,
TypeDescription type) {
return evaluatePredicateProto(statsProto, predicate, kind, encoding, bloomFilter,
writerVersion, type, true, false);
}
/**
* Evaluate a predicate with respect to the statistics from the column
* that is referenced in the predicate.
* Includes option to specify if timestamp column stats values
* should be in UTC and if the file writer used proleptic Gregorian calendar.
* @param statsProto the statistics for the column mentioned in the predicate
* @param predicate the leaf predicate we need to evaluation
* @param bloomFilter the bloom filter
* @param writerVersion the version of software that wrote the file
* @param type what is the kind of this column
* @param writerUsedProlepticGregorian file written using the proleptic Gregorian calendar
* @param useUTCTimestamp
* @return the set of truth values that may be returned for the given
* predicate.
*/
static TruthValue evaluatePredicateProto(OrcProto.ColumnStatistics statsProto,
PredicateLeaf predicate,
OrcProto.Stream.Kind kind,
OrcProto.ColumnEncoding encoding,
OrcProto.BloomFilter bloomFilter,
OrcFile.WriterVersion writerVersion,
TypeDescription type,
boolean writerUsedProlepticGregorian,
boolean useUTCTimestamp) {
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(
null, statsProto, writerUsedProlepticGregorian, true);
ValueRange range = getValueRange(cs, predicate, useUTCTimestamp);
// files written before ORC-135 stores timestamp wrt to local timezone causing issues with PPD.
// disable PPD for timestamp for all old files
TypeDescription.Category category = type.getCategory();
if (category == TypeDescription.Category.TIMESTAMP) {
if (!writerVersion.includes(OrcFile.WriterVersion.ORC_135)) {
LOG.debug("Not using predication pushdown on {} because it doesn't " +
"include ORC-135. Writer version: {}",
predicate.getColumnName(), writerVersion);
return range.addNull(TruthValue.YES_NO);
}
if (predicate.getType() != PredicateLeaf.Type.TIMESTAMP &&
predicate.getType() != PredicateLeaf.Type.DATE &&
predicate.getType() != PredicateLeaf.Type.STRING) {
return range.addNull(TruthValue.YES_NO);
}
} else if (writerVersion == OrcFile.WriterVersion.ORC_135 &&
category == TypeDescription.Category.DECIMAL &&
type.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
// ORC 1.5.0 to 1.5.5, which use WriterVersion.ORC_135, have broken
// min and max values for decimal64. See ORC-517.
LOG.debug("Not using predicate push down on {}, because the file doesn't"+
" include ORC-517. Writer version: {}",
predicate.getColumnName(), writerVersion);
return TruthValue.YES_NO_NULL;
} else if (category == TypeDescription.Category.DOUBLE ||
category == TypeDescription.Category.FLOAT) {
DoubleColumnStatistics dstas = (DoubleColumnStatistics) cs;
if (Double.isNaN(dstas.getSum())) {
LOG.debug("Not using predication pushdown on {} because stats contain NaN values",
predicate.getColumnName());
return dstas.hasNull() ? TruthValue.YES_NO_NULL : TruthValue.YES_NO;
}
}
return evaluatePredicateRange(predicate, range,
BloomFilterIO.deserialize(kind, encoding, writerVersion, type.getCategory(),
bloomFilter), useUTCTimestamp);
}
/**
* Evaluate a predicate with respect to the statistics from the column
* that is referenced in the predicate.
* @param stats the statistics for the column mentioned in the predicate
* @param predicate the leaf predicate we need to evaluation
* @return the set of truth values that may be returned for the given
* predicate.
*/
public static TruthValue evaluatePredicate(ColumnStatistics stats,
PredicateLeaf predicate,
BloomFilter bloomFilter) {
return evaluatePredicate(stats, predicate, bloomFilter, false);
}
/**
* Evaluate a predicate with respect to the statistics from the column
* that is referenced in the predicate.
* Includes option to specify if timestamp column stats values
* should be in UTC.
* @param stats the statistics for the column mentioned in the predicate
* @param predicate the leaf predicate we need to evaluation
* @param bloomFilter
* @param useUTCTimestamp
* @return the set of truth values that may be returned for the given
* predicate.
*/
public static TruthValue evaluatePredicate(ColumnStatistics stats,
PredicateLeaf predicate,
BloomFilter bloomFilter,
boolean useUTCTimestamp) {
ValueRange range = getValueRange(stats, predicate, useUTCTimestamp);
return evaluatePredicateRange(predicate, range, bloomFilter, useUTCTimestamp);
}
static TruthValue evaluatePredicateRange(PredicateLeaf predicate,
ValueRange range,
BloomFilter bloomFilter,
boolean useUTCTimestamp) {
if (!range.isValid()) {
return TruthValue.YES_NO_NULL;
}
// if we didn't have any values, everything must have been null
if (!range.hasValues()) {
if (predicate.getOperator() == PredicateLeaf.Operator.IS_NULL) {
return TruthValue.YES;
} else {
return TruthValue.NULL;
}
} else if (!range.isComparable()) {
return range.hasNulls ? TruthValue.YES_NO_NULL : TruthValue.YES_NO;
}
TruthValue result;
Comparable baseObj = (Comparable) predicate.getLiteral();
// Predicate object and stats objects are converted to the type of the predicate object.
Comparable predObj = getBaseObjectForComparison(predicate.getType(), baseObj);
result = evaluatePredicateMinMax(predicate, predObj, range);
if (shouldEvaluateBloomFilter(predicate, result, bloomFilter)) {
return evaluatePredicateBloomFilter(
predicate, predObj, bloomFilter, range.hasNulls, useUTCTimestamp);
} else {
return result;
}
}
private static boolean shouldEvaluateBloomFilter(PredicateLeaf predicate,
TruthValue result, BloomFilter bloomFilter) {
// evaluate bloom filter only when
// 1) Bloom filter is available
// 2) Min/Max evaluation yield YES or MAYBE
// 3) Predicate is EQUALS or IN list
return bloomFilter != null &&
result != TruthValue.NO_NULL && result != TruthValue.NO &&
(predicate.getOperator().equals(PredicateLeaf.Operator.EQUALS) ||
predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS) ||
predicate.getOperator().equals(PredicateLeaf.Operator.IN));
}
private static TruthValue evaluatePredicateMinMax(PredicateLeaf predicate,
Comparable predObj,
ValueRange range) {
Location loc;
switch (predicate.getOperator()) {
case NULL_SAFE_EQUALS:
loc = range.compare(predObj);
if (loc == Location.BEFORE || loc == Location.AFTER) {
return TruthValue.NO;
} else {
return TruthValue.YES_NO;
}
case EQUALS:
loc = range.compare(predObj);
if (range.isSingleton() && loc == Location.MIN) {
return range.addNull(TruthValue.YES);
} else if (loc == Location.BEFORE || loc == Location.AFTER) {
return range.addNull(TruthValue.NO);
} else {
return range.addNull(TruthValue.YES_NO);
}
case LESS_THAN:
loc = range.compare(predObj);
if (loc == Location.AFTER) {
return range.addNull(TruthValue.YES);
} else if (loc == Location.BEFORE || loc == Location.MIN) {
return range.addNull(TruthValue.NO);
} else {
return range.addNull(TruthValue.YES_NO);
}
case LESS_THAN_EQUALS:
loc = range.compare(predObj);
if (loc == Location.AFTER || loc == Location.MAX ||
(loc == Location.MIN && range.isSingleton())) {
return range.addNull(TruthValue.YES);
} else if (loc == Location.BEFORE) {
return range.addNull(TruthValue.NO);
} else {
return range.addNull(TruthValue.YES_NO);
}
case IN:
if (range.isSingleton()) {
// for a single value, look through to see if that value is in the
// set
for (Object arg : predicate.getLiteralList()) {
predObj = getBaseObjectForComparison(predicate.getType(), (Comparable) arg);
if (range.compare(predObj) == Location.MIN) {
return range.addNull(TruthValue.YES);
}
}
return range.addNull(TruthValue.NO);
} else {
// are all of the values outside of the range?
for (Object arg : predicate.getLiteralList()) {
predObj = getBaseObjectForComparison(predicate.getType(), (Comparable) arg);
loc = range.compare(predObj);
if (loc == Location.MIN || loc == Location.MIDDLE ||
loc == Location.MAX) {
return range.addNull(TruthValue.YES_NO);
}
}
return range.addNull(TruthValue.NO);
}
case BETWEEN:
List<Object> args = predicate.getLiteralList();
if (args == null || args.isEmpty()) {
return range.addNull(TruthValue.YES_NO);
}
Comparable predObj1 = getBaseObjectForComparison(predicate.getType(),
(Comparable) args.get(0));
loc = range.compare(predObj1);
if (loc == Location.BEFORE || loc == Location.MIN) {
Comparable predObj2 = getBaseObjectForComparison(predicate.getType(),
(Comparable) args.get(1));
Location loc2 = range.compare(predObj2);
if (loc2 == Location.AFTER || loc2 == Location.MAX) {
return range.addNull(TruthValue.YES);
} else if (loc2 == Location.BEFORE) {
return range.addNull(TruthValue.NO);
} else {
return range.addNull(TruthValue.YES_NO);
}
} else if (loc == Location.AFTER) {
return range.addNull(TruthValue.NO);
} else {
return range.addNull(TruthValue.YES_NO);
}
case IS_NULL:
// min = null condition above handles the all-nulls YES case
return range.hasNulls ? TruthValue.YES_NO : TruthValue.NO;
default:
return range.addNull(TruthValue.YES_NO);
}
}
private static TruthValue evaluatePredicateBloomFilter(PredicateLeaf predicate,
final Object predObj, BloomFilter bloomFilter, boolean hasNull, boolean useUTCTimestamp) {
switch (predicate.getOperator()) {
case NULL_SAFE_EQUALS:
// null safe equals does not return *_NULL variant. So set hasNull to false
return checkInBloomFilter(bloomFilter, predObj, false, useUTCTimestamp);
case EQUALS:
return checkInBloomFilter(bloomFilter, predObj, hasNull, useUTCTimestamp);
case IN:
for (Object arg : predicate.getLiteralList()) {
// if atleast one value in IN list exist in bloom filter, qualify the row group/stripe
Object predObjItem = getBaseObjectForComparison(predicate.getType(), (Comparable) arg);
TruthValue result =
checkInBloomFilter(bloomFilter, predObjItem, hasNull, useUTCTimestamp);
if (result == TruthValue.YES_NO_NULL || result == TruthValue.YES_NO) {
return result;
}
}
return hasNull ? TruthValue.NO_NULL : TruthValue.NO;
default:
return hasNull ? TruthValue.YES_NO_NULL : TruthValue.YES_NO;
}
}
private static TruthValue checkInBloomFilter(BloomFilter bf,
Object predObj,
boolean hasNull,
boolean useUTCTimestamp) {
TruthValue result = hasNull ? TruthValue.NO_NULL : TruthValue.NO;
if (predObj instanceof Long) {
if (bf.testLong((Long) predObj)) {
result = TruthValue.YES_NO_NULL;
}
} else if (predObj instanceof Double) {
if (bf.testDouble((Double) predObj)) {
result = TruthValue.YES_NO_NULL;
}
} else if (predObj instanceof String || predObj instanceof Text ||
predObj instanceof HiveDecimalWritable ||
predObj instanceof BigDecimal) {
if (bf.testString(predObj.toString())) {
result = TruthValue.YES_NO_NULL;
}
} else if (predObj instanceof Timestamp) {
if (useUTCTimestamp) {
if (bf.testLong(((Timestamp) predObj).getTime())) {
result = TruthValue.YES_NO_NULL;
}
} else {
if (bf.testLong(SerializationUtils.convertToUtc(
TimeZone.getDefault(), ((Timestamp) predObj).getTime()))) {
result = TruthValue.YES_NO_NULL;
}
}
} else if (predObj instanceof ChronoLocalDate) {
if (bf.testLong(((ChronoLocalDate) predObj).toEpochDay())) {
result = TruthValue.YES_NO_NULL;
}
} else {
// if the predicate object is null and if hasNull says there are no nulls then return NO
if (predObj == null && !hasNull) {
result = TruthValue.NO;
} else {
result = TruthValue.YES_NO_NULL;
}
}
if (result == TruthValue.YES_NO_NULL && !hasNull) {
result = TruthValue.YES_NO;
}
LOG.debug("Bloom filter evaluation: {}", result);
return result;
}
/**
* An exception for when we can't cast things appropriately
*/
static class SargCastException extends IllegalArgumentException {
SargCastException(String string) {
super(string);
}
}
private static Comparable getBaseObjectForComparison(PredicateLeaf.Type type,
Comparable obj) {
if (obj == null) {
return null;
}
switch (type) {
case BOOLEAN:
if (obj instanceof Boolean) {
return obj;
} else {
// will only be true if the string conversion yields "true", all other values are
// considered false
return Boolean.valueOf(obj.toString());
}
case DATE:
if (obj instanceof ChronoLocalDate) {
return obj;
} else if (obj instanceof java.sql.Date) {
return ((java.sql.Date) obj).toLocalDate();
} else if (obj instanceof Date) {
return LocalDateTime.ofInstant(((Date) obj).toInstant(),
ZoneOffset.UTC).toLocalDate();
} else if (obj instanceof String) {
return LocalDate.parse((String) obj);
} else if (obj instanceof Timestamp) {
return ((Timestamp) obj).toLocalDateTime().toLocalDate();
}
// always string, but prevent the comparison to numbers (are they days/seconds/milliseconds?)
break;
case DECIMAL:
if (obj instanceof Boolean) {
return new HiveDecimalWritable((Boolean) obj ?
HiveDecimal.ONE : HiveDecimal.ZERO);
} else if (obj instanceof Integer) {
return new HiveDecimalWritable((Integer) obj);
} else if (obj instanceof Long) {
return new HiveDecimalWritable(((Long) obj));
} else if (obj instanceof Float || obj instanceof Double ||
obj instanceof String) {
return new HiveDecimalWritable(obj.toString());
} else if (obj instanceof BigDecimal) {
return new HiveDecimalWritable(HiveDecimal.create((BigDecimal) obj));
} else if (obj instanceof HiveDecimal) {
return new HiveDecimalWritable((HiveDecimal) obj);
} else if (obj instanceof HiveDecimalWritable) {
return obj;
} else if (obj instanceof Timestamp) {
return new HiveDecimalWritable(Double.toString(
TimestampUtils.getDouble((Timestamp) obj)));
}
break;
case FLOAT:
if (obj instanceof Number) {
// widening conversion
return ((Number) obj).doubleValue();
} else if (obj instanceof HiveDecimal) {
return ((HiveDecimal) obj).doubleValue();
} else if (obj instanceof String) {
return Double.valueOf(obj.toString());
} else if (obj instanceof Timestamp) {
return TimestampUtils.getDouble((Timestamp) obj);
}
break;
case LONG:
if (obj instanceof Number) {
// widening conversion
return ((Number) obj).longValue();
} else if (obj instanceof HiveDecimal) {
return ((HiveDecimal) obj).longValue();
} else if (obj instanceof String) {
return Long.valueOf(obj.toString());
}
break;
case STRING:
if (obj instanceof ChronoLocalDate) {
ChronoLocalDate date = (ChronoLocalDate) obj;
return date.format(DateTimeFormatter.ISO_LOCAL_DATE
.withChronology(date.getChronology()));
}
return (obj.toString());
case TIMESTAMP:
if (obj instanceof Timestamp) {
return obj;
} else if (obj instanceof Integer) {
return new Timestamp(((Number) obj).longValue());
} else if (obj instanceof Float) {
return TimestampUtils.doubleToTimestamp(((Float) obj).doubleValue());
} else if (obj instanceof Double) {
return TimestampUtils.doubleToTimestamp((Double) obj);
} else if (obj instanceof HiveDecimal) {
return TimestampUtils.decimalToTimestamp((HiveDecimal) obj);
} else if (obj instanceof HiveDecimalWritable) {
return TimestampUtils.decimalToTimestamp(((HiveDecimalWritable) obj).getHiveDecimal());
} else if (obj instanceof Date) {
return new Timestamp(((Date) obj).getTime());
} else if (obj instanceof ChronoLocalDate) {
return new Timestamp(((ChronoLocalDate) obj).atTime(LocalTime.MIDNIGHT)
.toInstant(ZoneOffset.UTC).getEpochSecond() * 1000L);
}
// float/double conversion to timestamp is interpreted as seconds whereas integer conversion
// to timestamp is interpreted as milliseconds by default. The integer to timestamp casting
// is also config driven. The filter operator changes its promotion based on config:
// "int.timestamp.conversion.in.seconds". Disable PPD for integer cases.
break;
default:
break;
}
throw new SargCastException(String.format(
"ORC SARGS could not convert from %s to %s", obj.getClass()
.getSimpleName(), type));
}
public static class SargApplier {
public static final boolean[] READ_ALL_RGS = null;
public static final boolean[] READ_NO_RGS = new boolean[0];
private final OrcFile.WriterVersion writerVersion;
private final SearchArgument sarg;
private final List<PredicateLeaf> sargLeaves;
private final int[] filterColumns;
private final long rowIndexStride;
// same as the above array, but indices are set to true
private final SchemaEvolution evolution;
private final long[] exceptionCount;
private final boolean useUTCTimestamp;
private final boolean writerUsedProlepticGregorian;
private final boolean convertToProlepticGregorian;
/**
* @deprecated Use the constructor having full parameters. This exists for backward compatibility.
*/
public SargApplier(SearchArgument sarg,
long rowIndexStride,
SchemaEvolution evolution,
OrcFile.WriterVersion writerVersion,
boolean useUTCTimestamp) {
this(sarg, rowIndexStride, evolution, writerVersion, useUTCTimestamp, false, false);
}
public SargApplier(SearchArgument sarg,
long rowIndexStride,
SchemaEvolution evolution,
OrcFile.WriterVersion writerVersion,
boolean useUTCTimestamp,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
this.writerVersion = writerVersion;
this.sarg = sarg;
sargLeaves = sarg.getLeaves();
this.writerUsedProlepticGregorian = writerUsedProlepticGregorian;
this.convertToProlepticGregorian = convertToProlepticGregorian;
filterColumns = mapSargColumnsToOrcInternalColIdx(sargLeaves,
evolution);
this.rowIndexStride = rowIndexStride;
this.evolution = evolution;
exceptionCount = new long[sargLeaves.size()];
this.useUTCTimestamp = useUTCTimestamp;
}
public void setRowIndexCols(boolean[] rowIndexCols) {
// included will not be null, row options will fill the array with
// trues if null
for (int i : filterColumns) {
// filter columns may have -1 as index which could be partition
// column in SARG.
if (i > 0) {
rowIndexCols[i] = true;
}
}
}
/**
* Pick the row groups that we need to load from the current stripe.
*
* @return an array with a boolean for each row group or null if all of the
* row groups must be read.
* @throws IOException
*/
public boolean[] pickRowGroups(StripeInformation stripe,
OrcProto.RowIndex[] indexes,
OrcProto.Stream.Kind[] bloomFilterKinds,
List<OrcProto.ColumnEncoding> encodings,
OrcProto.BloomFilterIndex[] bloomFilterIndices,
boolean returnNone) throws IOException {
long rowsInStripe = stripe.getNumberOfRows();
int groupsInStripe = (int) ((rowsInStripe + rowIndexStride - 1) / rowIndexStride);
boolean[] result = new boolean[groupsInStripe]; // TODO: avoid alloc?
TruthValue[] leafValues = new TruthValue[sargLeaves.size()];
boolean hasSelected = false;
boolean hasSkipped = false;
TruthValue[] exceptionAnswer = new TruthValue[leafValues.length];
for (int rowGroup = 0; rowGroup < result.length; ++rowGroup) {
for (int pred = 0; pred < leafValues.length; ++pred) {
int columnIx = filterColumns[pred];
if (columnIx == -1) {
// the column is a virtual column
leafValues[pred] = TruthValue.YES_NO_NULL;
} else if (exceptionAnswer[pred] != null) {
leafValues[pred] = exceptionAnswer[pred];
} else {
if (indexes[columnIx] == null) {
LOG.warn("Index is not populated for " + columnIx);
return READ_ALL_RGS;
}
OrcProto.RowIndexEntry entry = indexes[columnIx].getEntry(rowGroup);
if (entry == null) {
throw new AssertionError("RG is not populated for " + columnIx + " rg " + rowGroup);
}
OrcProto.ColumnStatistics stats = EMPTY_COLUMN_STATISTICS;
if (entry.hasStatistics()) {
stats = entry.getStatistics();
}
OrcProto.BloomFilter bf = null;
OrcProto.Stream.Kind bfk = null;
if (bloomFilterIndices != null && bloomFilterIndices[columnIx] != null) {
bfk = bloomFilterKinds[columnIx];
bf = bloomFilterIndices[columnIx].getBloomFilter(rowGroup);
}
if (evolution != null && evolution.isPPDSafeConversion(columnIx)) {
PredicateLeaf predicate = sargLeaves.get(pred);
try {
leafValues[pred] = evaluatePredicateProto(stats,
predicate, bfk, encodings.get(columnIx), bf,
writerVersion, evolution.getFileSchema().
findSubtype(columnIx),
writerUsedProlepticGregorian, useUTCTimestamp);
} catch (Exception e) {
exceptionCount[pred] += 1;
if (e instanceof SargCastException) {
LOG.info("Skipping ORC PPD - " + e.getMessage() + " on "
+ predicate);
} else {
final String reason = e.getClass().getSimpleName() +
" when evaluating predicate." +
" Skipping ORC PPD." +
" Stats: " + stats +
" Predicate: " + predicate;
LOG.warn(reason, e);
}
boolean hasNoNull = stats.hasHasNull() && !stats.getHasNull();
if (predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS) ||
hasNoNull) {
exceptionAnswer[pred] = TruthValue.YES_NO;
} else {
exceptionAnswer[pred] = TruthValue.YES_NO_NULL;
}
leafValues[pred] = exceptionAnswer[pred];
}
} else {
leafValues[pred] = TruthValue.YES_NO_NULL;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Stats = " + stats);
LOG.trace("Setting " + sargLeaves.get(pred) + " to " + leafValues[pred]);
}
}
}
result[rowGroup] = sarg.evaluate(leafValues).isNeeded();
hasSelected = hasSelected || result[rowGroup];
hasSkipped = hasSkipped || (!result[rowGroup]);
if (LOG.isDebugEnabled()) {
LOG.debug("Row group " + (rowIndexStride * rowGroup) + " to " +
(rowIndexStride * (rowGroup + 1) - 1) + " is " +
(result[rowGroup] ? "" : "not ") + "included.");
}
}
return hasSkipped ? ((hasSelected || !returnNone) ? result : READ_NO_RGS) : READ_ALL_RGS;
}
/**
* Get the count of exceptions for testing.
* @return
*/
long[] getExceptionCount() {
return exceptionCount;
}
}
/**
* Pick the row groups that we need to load from the current stripe.
*
* @return an array with a boolean for each row group or null if all of the
* row groups must be read.
* @throws IOException
*/
protected boolean[] pickRowGroups() throws IOException {
// Read the Row Indicies if required
if (rowIndexColsToRead != null) {
readCurrentStripeRowIndex();
}
// In the absence of SArg all rows groups should be included
if (sargApp == null) {
return null;
}
return sargApp.pickRowGroups(stripes.get(currentStripe),
indexes.getRowGroupIndex(),
skipBloomFilters ? null : indexes.getBloomFilterKinds(),
stripeFooter.getColumnsList(),
skipBloomFilters ? null : indexes.getBloomFilterIndex(),
false);
}
private void clearStreams() {
planner.clearStreams();
}
/**
* Read the current stripe into memory.
*
* @throws IOException
*/
private void readStripe() throws IOException {
StripeInformation stripe = beginReadStripe();
planner.parseStripe(stripe, fileIncluded);
includedRowGroups = pickRowGroups();
// move forward to the first unskipped row
if (includedRowGroups != null) {
while (rowInStripe < rowCountInStripe &&
!includedRowGroups[(int) (rowInStripe / rowIndexStride)]) {
rowInStripe = Math.min(rowCountInStripe, rowInStripe + rowIndexStride);
}
}
// if we haven't skipped the whole stripe, read the data
if (rowInStripe < rowCountInStripe) {
planner.readData(indexes, includedRowGroups, false, startReadPhase);
reader.startStripe(planner, startReadPhase);
needsFollowColumnsRead = true;
// if we skipped the first row group, move the pointers forward
if (rowInStripe != 0) {
seekToRowEntry(reader, (int) (rowInStripe / rowIndexStride), startReadPhase);
}
}
}
private StripeInformation beginReadStripe() throws IOException {
StripeInformation stripe = stripes.get(currentStripe);
stripeFooter = readStripeFooter(stripe);
clearStreams();
// setup the position in the stripe
rowCountInStripe = stripe.getNumberOfRows();
rowInStripe = 0;
followRowInStripe = 0;
rowBaseInStripe = 0;
for (int i = 0; i < currentStripe; ++i) {
rowBaseInStripe += stripes.get(i).getNumberOfRows();
}
// reset all of the indexes
OrcProto.RowIndex[] rowIndex = indexes.getRowGroupIndex();
for (int i = 0; i < rowIndex.length; ++i) {
rowIndex[i] = null;
}
return stripe;
}
/**
* Read the next stripe until we find a row that we don't skip.
*
* @throws IOException
*/
private void advanceStripe() throws IOException {
rowInStripe = rowCountInStripe;
while (rowInStripe >= rowCountInStripe &&
currentStripe < stripes.size() - 1) {
currentStripe += 1;
readStripe();
}
}
/**
* Determine the RowGroup based on the supplied row id.
* @param rowIdx Row for which the row group is being determined
* @return Id of the RowGroup that the row belongs to
*/
private int computeRGIdx(long rowIdx) {
return rowIndexStride == 0 ? 0 : (int) (rowIdx / rowIndexStride);
}
/**
* Skip over rows that we aren't selecting, so that the next row is
* one that we will read.
*
* @param nextRow the row we want to go to
* @throws IOException
*/
private boolean advanceToNextRow(BatchReader reader, long nextRow, boolean canAdvanceStripe)
throws IOException {
long nextRowInStripe = nextRow - rowBaseInStripe;
// check for row skipping
if (rowIndexStride != 0 &&
includedRowGroups != null &&
nextRowInStripe < rowCountInStripe) {
int rowGroup = computeRGIdx(nextRowInStripe);
if (!includedRowGroups[rowGroup]) {
while (rowGroup < includedRowGroups.length && !includedRowGroups[rowGroup]) {
rowGroup += 1;
}
if (rowGroup >= includedRowGroups.length) {
if (canAdvanceStripe) {
advanceStripe();
}
return canAdvanceStripe;
}
nextRowInStripe = Math.min(rowCountInStripe, rowGroup * rowIndexStride);
}
}
if (nextRowInStripe >= rowCountInStripe) {
if (canAdvanceStripe) {
advanceStripe();
}
return canAdvanceStripe;
}
if (nextRowInStripe != rowInStripe) {
if (rowIndexStride != 0) {
int rowGroup = (int) (nextRowInStripe / rowIndexStride);
seekToRowEntry(reader, rowGroup, startReadPhase);
reader.skipRows(nextRowInStripe - rowGroup * rowIndexStride, startReadPhase);
} else {
reader.skipRows(nextRowInStripe - rowInStripe, startReadPhase);
}
rowInStripe = nextRowInStripe;
}
return true;
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
try {
int batchSize;
// do...while is required to handle the case where the filter eliminates all rows in the
// batch, we never return an empty batch unless the file is exhausted
do {
if (rowInStripe >= rowCountInStripe) {
currentStripe += 1;
if (currentStripe >= stripes.size()) {
batch.size = 0;
return false;
}
// Read stripe in Memory
readStripe();
followRowInStripe = rowInStripe;
}
batchSize = computeBatchSize(batch.getMaxSize());
reader.setVectorColumnCount(batch.getDataColumnCount());
reader.nextBatch(batch, batchSize, startReadPhase);
if (startReadPhase == TypeReader.ReadPhase.LEADERS && batch.size > 0) {
// At least 1 row has been selected and as a result we read the follow columns into the
// row batch
reader.nextBatch(batch,
batchSize,
prepareFollowReaders(rowInStripe, followRowInStripe));
followRowInStripe = rowInStripe + batchSize;
}
rowInStripe += batchSize;
advanceToNextRow(reader, rowInStripe + rowBaseInStripe, true);
// batch.size can be modified by filter so only batchSize can tell if we actually read rows
} while (batchSize != 0 && batch.size == 0);
if (noSelectedVector) {
// In case selected vector is not supported we leave the size to be read size. In this case
// the non filter columns might be read selectively, however the filter after the reader
// should eliminate rows that don't match predicate conditions
batch.size = batchSize;
batch.selectedInUse = false;
}
return batchSize != 0;
} catch (IOException e) {
// Rethrow exception with file name in log message
throw new IOException("Error reading file: " + path, e);
}
}
/**
* This method prepares the non-filter column readers for next batch. This involves the following
* 1. Determine position
* 2. Perform IO if required
* 3. Position the non-filter readers
*
* This method is repositioning the non-filter columns and as such this method shall never have to
* deal with navigating the stripe forward or skipping row groups, all of this should have already
* taken place based on the filter columns.
* @param toFollowRow The rowIdx identifies the required row position within the stripe for
* follow read
* @param fromFollowRow Indicates the current position of the follow read, exclusive
* @return the read phase for reading non-filter columns, this shall be FOLLOWERS_AND_PARENTS in
* case of a seek otherwise will be FOLLOWERS
*/
private TypeReader.ReadPhase prepareFollowReaders(long toFollowRow, long fromFollowRow)
throws IOException {
// 1. Determine the required row group and skip rows needed from the RG start
int needRG = computeRGIdx(toFollowRow);
// The current row is not yet read so we -1 to compute the previously read row group
int readRG = computeRGIdx(fromFollowRow - 1);
long skipRows;
if (needRG == readRG && toFollowRow >= fromFollowRow) {
// In case we are skipping forward within the same row group, we compute skip rows from the
// current position
skipRows = toFollowRow - fromFollowRow;
} else {
// In all other cases including seeking backwards, we compute the skip rows from the start of
// the required row group
skipRows = toFollowRow - (needRG * rowIndexStride);
}
// 2. Plan the row group idx for the non-filter columns if this has not already taken place
if (needsFollowColumnsRead) {
needsFollowColumnsRead = false;
planner.readFollowData(indexes, includedRowGroups, needRG, false);
reader.startStripe(planner, TypeReader.ReadPhase.FOLLOWERS);
}
// 3. Position the non-filter readers to the required RG and skipRows
TypeReader.ReadPhase result = TypeReader.ReadPhase.FOLLOWERS;
if (needRG != readRG || toFollowRow < fromFollowRow) {
// When having to change a row group or in case of back navigation, seek both the filter
// parents and non-filter. This will re-position the parents present vector. This is needed
// to determine the number of non-null values to skip on the non-filter columns.
seekToRowEntry(reader, needRG, TypeReader.ReadPhase.FOLLOWERS_AND_PARENTS);
// skip rows on both the filter parents and non-filter as both have been positioned in the
// previous step
reader.skipRows(skipRows, TypeReader.ReadPhase.FOLLOWERS_AND_PARENTS);
result = TypeReader.ReadPhase.FOLLOWERS_AND_PARENTS;
} else if (skipRows > 0) {
// in case we are only skipping within the row group, position the filter parents back to the
// position of the follow. This is required to determine the non-null values to skip on the
// non-filter columns.
seekToRowEntry(reader, readRG, TypeReader.ReadPhase.LEADER_PARENTS);
reader.skipRows(fromFollowRow - (readRG * rowIndexStride),
TypeReader.ReadPhase.LEADER_PARENTS);
// Move both the filter parents and non-filter forward, this will compute the correct
// non-null skips on follow children
reader.skipRows(skipRows, TypeReader.ReadPhase.FOLLOWERS_AND_PARENTS);
result = TypeReader.ReadPhase.FOLLOWERS_AND_PARENTS;
}
// Identifies the read level that should be performed for the read
// FOLLOWERS_WITH_PARENTS indicates repositioning identifying both non-filter and filter parents
// FOLLOWERS indicates read only of the non-filter level without the parents, which is used during
// contiguous read. During a contiguous read no skips are needed and the non-null information of
// the parent is available in the column vector for use during non-filter read
return result;
}
private int computeBatchSize(long targetBatchSize) {
final int batchSize;
// In case of PPD, batch size should be aware of row group boundaries. If only a subset of row
// groups are selected then marker position is set to the end of range (subset of row groups
// within strip). Batch size computed out of marker position makes sure that batch size is
// aware of row group boundary and will not cause overflow when reading rows
// illustration of this case is here https://issues.apache.org/jira/browse/HIVE-6287
if (rowIndexStride != 0 &&
(includedRowGroups != null || startReadPhase != TypeReader.ReadPhase.ALL) &&
rowInStripe < rowCountInStripe) {
int startRowGroup = (int) (rowInStripe / rowIndexStride);
if (includedRowGroups != null && !includedRowGroups[startRowGroup]) {
while (startRowGroup < includedRowGroups.length && !includedRowGroups[startRowGroup]) {
startRowGroup += 1;
}
}
int endRowGroup = startRowGroup;
// We force row group boundaries when dealing with filters. We adjust the end row group to
// be the next row group even if more than one are possible selections.
if (includedRowGroups != null && startReadPhase == TypeReader.ReadPhase.ALL) {
while (endRowGroup < includedRowGroups.length && includedRowGroups[endRowGroup]) {
endRowGroup += 1;
}
} else {
endRowGroup += 1;
}
final long markerPosition = Math.min((endRowGroup * rowIndexStride), rowCountInStripe);
batchSize = (int) Math.min(targetBatchSize, (markerPosition - rowInStripe));
if (isLogDebugEnabled && batchSize < targetBatchSize) {
LOG.debug("markerPosition: " + markerPosition + " batchSize: " + batchSize);
}
} else {
batchSize = (int) Math.min(targetBatchSize, (rowCountInStripe - rowInStripe));
}
return batchSize;
}
@Override
public void close() throws IOException {
clearStreams();
dataReader.close();
}
@Override
public long getRowNumber() {
return rowInStripe + rowBaseInStripe + firstRow;
}
/**
* Return the fraction of rows that have been read from the selected.
* section of the file
*
* @return fraction between 0.0 and 1.0 of rows consumed
*/
@Override
public float getProgress() {
return ((float) rowBaseInStripe + rowInStripe) / totalRowCount;
}
private int findStripe(long rowNumber) {
for (int i = 0; i < stripes.size(); i++) {
StripeInformation stripe = stripes.get(i);
if (stripe.getNumberOfRows() > rowNumber) {
return i;
}
rowNumber -= stripe.getNumberOfRows();
}
throw new IllegalArgumentException("Seek after the end of reader range");
}
private void readCurrentStripeRowIndex() throws IOException {
planner.readRowIndex(rowIndexColsToRead, indexes);
}
public OrcIndex readRowIndex(int stripeIndex,
boolean[] included,
boolean[] readCols) throws IOException {
// Use the cached objects if the read request matches the cached request
if (stripeIndex == currentStripe &&
(readCols == null || Arrays.equals(readCols, rowIndexColsToRead))) {
if (rowIndexColsToRead != null) {
return indexes;
} else {
return planner.readRowIndex(readCols, indexes);
}
} else {
StripePlanner copy = new StripePlanner(planner);
if (included == null) {
included = new boolean[schema.getMaximumId() + 1];
Arrays.fill(included, true);
}
copy.parseStripe(stripes.get(stripeIndex), included);
return copy.readRowIndex(readCols, null);
}
}
private void seekToRowEntry(BatchReader reader, int rowEntry, TypeReader.ReadPhase readPhase)
throws IOException {
OrcProto.RowIndex[] rowIndices = indexes.getRowGroupIndex();
PositionProvider[] index = new PositionProvider[rowIndices.length];
for (int i = 0; i < index.length; ++i) {
if (rowIndices[i] != null) {
OrcProto.RowIndexEntry entry = rowIndices[i].getEntry(rowEntry);
// This is effectively a test for pre-ORC-569 files.
if (rowEntry == 0 && entry.getPositionsCount() == 0) {
index[i] = new ZeroPositionProvider();
} else {
index[i] = new PositionProviderImpl(entry);
}
}
}
reader.seek(index, readPhase);
}
@Override
public void seekToRow(long rowNumber) throws IOException {
if (rowNumber < 0) {
throw new IllegalArgumentException("Seek to a negative row number " +
rowNumber);
} else if (rowNumber < firstRow) {
throw new IllegalArgumentException("Seek before reader range " +
rowNumber);
}
// convert to our internal form (rows from the beginning of slice)
rowNumber -= firstRow;
// move to the right stripe
int rightStripe = findStripe(rowNumber);
if (rightStripe != currentStripe) {
currentStripe = rightStripe;
readStripe();
}
if (rowIndexColsToRead == null) {
// Read the row indexes only if they were not already read as part of readStripe()
readCurrentStripeRowIndex();
}
// if we aren't to the right row yet, advance in the stripe.
advanceToNextRow(reader, rowNumber, true);
}
private static final String TRANSLATED_SARG_SEPARATOR = "_";
public static String encodeTranslatedSargColumn(int rootColumn, Integer indexInSourceTable) {
return rootColumn + TRANSLATED_SARG_SEPARATOR
+ ((indexInSourceTable == null) ? -1 : indexInSourceTable);
}
public static int[] mapTranslatedSargColumns(
List<OrcProto.Type> types, List<PredicateLeaf> sargLeaves) {
int[] result = new int[sargLeaves.size()];
OrcProto.Type lastRoot = null; // Root will be the same for everyone as of now.
String lastRootStr = null;
for (int i = 0; i < result.length; ++i) {
String[] rootAndIndex = sargLeaves.get(i).getColumnName().split(TRANSLATED_SARG_SEPARATOR);
assert rootAndIndex.length == 2;
String rootStr = rootAndIndex[0], indexStr = rootAndIndex[1];
int index = Integer.parseInt(indexStr);
// First, check if the column even maps to anything.
if (index == -1) {
result[i] = -1;
continue;
}
assert index >= 0;
// Then, find the root type if needed.
if (!rootStr.equals(lastRootStr)) {
lastRoot = types.get(Integer.parseInt(rootStr));
lastRootStr = rootStr;
}
// Subtypes of the root types correspond, in order, to the columns in the table schema
// (disregarding schema evolution that doesn't presently work). Get the index for the
// corresponding subtype.
result[i] = lastRoot.getSubtypes(index);
}
return result;
}
public CompressionCodec getCompressionCodec() {
return dataReader.getCompressionOptions().getCodec();
}
public int getMaxDiskRangeChunkLimit() {
return maxDiskRangeChunkLimit;
}
}
| 70,850 | 40.38493 | 102 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RecordReaderUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import org.apache.orc.CompressionCodec;
import org.apache.orc.DataReader;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Supplier;
/**
* Stateless methods shared between RecordReaderImpl and EncodedReaderImpl.
*/
public class RecordReaderUtils {
private static final HadoopShims SHIMS = HadoopShimsFactory.get();
private static final Logger LOG = LoggerFactory.getLogger(RecordReaderUtils.class);
private static class DefaultDataReader implements DataReader {
private FSDataInputStream file;
private ByteBufferAllocatorPool pool;
private HadoopShims.ZeroCopyReaderShim zcr = null;
private final Supplier<FileSystem> fileSystemSupplier;
private final Path path;
private final boolean useZeroCopy;
private final int minSeekSize;
private final double minSeekSizeTolerance;
private InStream.StreamOptions options;
private boolean isOpen = false;
private DefaultDataReader(DataReaderProperties properties) {
this.fileSystemSupplier = properties.getFileSystemSupplier();
this.path = properties.getPath();
this.file = properties.getFile();
this.useZeroCopy = properties.getZeroCopy();
this.options = properties.getCompression();
this.minSeekSize = properties.getMinSeekSize();
this.minSeekSizeTolerance = properties.getMinSeekSizeTolerance();
}
@Override
public void open() throws IOException {
if (file == null) {
this.file = fileSystemSupplier.get().open(path);
}
if (useZeroCopy) {
// ZCR only uses codec for boolean checks.
pool = new ByteBufferAllocatorPool();
zcr = RecordReaderUtils.createZeroCopyShim(file, options.getCodec(), pool);
} else {
zcr = null;
}
isOpen = true;
}
@Override
public OrcProto.StripeFooter readStripeFooter(StripeInformation stripe) throws IOException {
if (!isOpen) {
open();
}
long offset = stripe.getOffset() + stripe.getIndexLength() + stripe.getDataLength();
int tailLength = (int) stripe.getFooterLength();
// read the footer
ByteBuffer tailBuf = ByteBuffer.allocate(tailLength);
file.readFully(offset, tailBuf.array(), tailBuf.arrayOffset(), tailLength);
return OrcProto.StripeFooter.parseFrom(
InStream.createCodedInputStream(InStream.create("footer",
new BufferChunk(tailBuf, 0), 0, tailLength, options)));
}
@Override
public BufferChunkList readFileData(BufferChunkList range,
boolean doForceDirect
) throws IOException {
RecordReaderUtils.readDiskRanges(file, zcr, range, doForceDirect, minSeekSize,
minSeekSizeTolerance);
return range;
}
@Override
public void close() throws IOException {
if (options.getCodec() != null) {
OrcCodecPool.returnCodec(options.getCodec().getKind(), options.getCodec());
options.withCodec(null);
}
if (pool != null) {
pool.clear();
}
// close both zcr and file
try (HadoopShims.ZeroCopyReaderShim myZcr = zcr) {
if (file != null) {
file.close();
file = null;
}
}
}
@Override
public boolean isTrackingDiskRanges() {
return zcr != null;
}
@Override
public void releaseBuffer(ByteBuffer buffer) {
zcr.releaseBuffer(buffer);
}
@Override
public DataReader clone() {
if (this.file != null) {
// We should really throw here, but that will cause failures in Hive.
// While Hive uses clone, just log a warning.
LOG.warn("Cloning an opened DataReader; the stream will be reused and closed twice");
}
try {
DefaultDataReader clone = (DefaultDataReader) super.clone();
if (options.getCodec() != null) {
// Make sure we don't share the same codec between two readers.
clone.options = options.clone();
}
return clone;
} catch (CloneNotSupportedException e) {
throw new UnsupportedOperationException("uncloneable", e);
}
}
@Override
public InStream.StreamOptions getCompressionOptions() {
return options;
}
}
public static DataReader createDefaultDataReader(DataReaderProperties properties) {
return new DefaultDataReader(properties);
}
/**
* Does region A overlap region B? The end points are inclusive on both sides.
* @param leftA A's left point
* @param rightA A's right point
* @param leftB B's left point
* @param rightB B's right point
* @return Does region A overlap region B?
*/
static boolean overlap(long leftA, long rightA, long leftB, long rightB) {
if (leftA <= leftB) {
return rightA >= leftB;
}
return rightB >= leftA;
}
public static long estimateRgEndOffset(boolean isCompressed,
int bufferSize,
boolean isLast,
long nextGroupOffset,
long streamLength) {
// Figure out the worst case last location
long slop = WORST_UNCOMPRESSED_SLOP;
// Stretch the slop by a factor to safely accommodate following compression blocks.
// We need to calculate the maximum number of blocks(stretchFactor) by bufferSize accordingly.
if (isCompressed) {
int stretchFactor = 2 + (MAX_VALUES_LENGTH * MAX_BYTE_WIDTH - 1) / bufferSize;
slop = stretchFactor * (OutStream.HEADER_SIZE + bufferSize);
}
return isLast ? streamLength : Math.min(streamLength, nextGroupOffset + slop);
}
private static final int BYTE_STREAM_POSITIONS = 1;
private static final int RUN_LENGTH_BYTE_POSITIONS = BYTE_STREAM_POSITIONS + 1;
private static final int BITFIELD_POSITIONS = RUN_LENGTH_BYTE_POSITIONS + 1;
private static final int RUN_LENGTH_INT_POSITIONS = BYTE_STREAM_POSITIONS + 1;
/**
* Get the offset in the index positions for the column that the given
* stream starts.
* @param columnEncoding the encoding of the column
* @param columnType the type of the column
* @param streamType the kind of the stream
* @param isCompressed is the stream compressed?
* @param hasNulls does the column have a PRESENT stream?
* @return the number of positions that will be used for that stream
*/
public static int getIndexPosition(OrcProto.ColumnEncoding.Kind columnEncoding,
TypeDescription.Category columnType,
OrcProto.Stream.Kind streamType,
boolean isCompressed,
boolean hasNulls) {
if (streamType == OrcProto.Stream.Kind.PRESENT) {
return 0;
}
int compressionValue = isCompressed ? 1 : 0;
int base = hasNulls ? (BITFIELD_POSITIONS + compressionValue) : 0;
switch (columnType) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case DATE:
case STRUCT:
case MAP:
case LIST:
case UNION:
return base;
case CHAR:
case VARCHAR:
case STRING:
if (columnEncoding == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
columnEncoding == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
return base;
} else {
if (streamType == OrcProto.Stream.Kind.DATA) {
return base;
} else {
return base + BYTE_STREAM_POSITIONS + compressionValue;
}
}
case BINARY:
case DECIMAL:
if (streamType == OrcProto.Stream.Kind.DATA) {
return base;
}
return base + BYTE_STREAM_POSITIONS + compressionValue;
case TIMESTAMP:
case TIMESTAMP_INSTANT:
if (streamType == OrcProto.Stream.Kind.DATA) {
return base;
}
return base + RUN_LENGTH_INT_POSITIONS + compressionValue;
default:
throw new IllegalArgumentException("Unknown type " + columnType);
}
}
// for uncompressed streams, what is the most overlap with the following set
// of rows (long vint literal group).
static final int WORST_UNCOMPRESSED_SLOP = 2 + 8 * 512;
// the maximum number of values that need to be consumed from the run
static final int MAX_VALUES_LENGTH = RunLengthIntegerWriterV2.MAX_SCOPE;
// the maximum byte width for each value
static final int MAX_BYTE_WIDTH =
SerializationUtils.decodeBitWidth(SerializationUtils.FixedBitSizes.SIXTYFOUR.ordinal()) / 8;
/**
* Is this stream part of a dictionary?
* @return is this part of a dictionary?
*/
public static boolean isDictionary(OrcProto.Stream.Kind kind,
OrcProto.ColumnEncoding encoding) {
assert kind != OrcProto.Stream.Kind.DICTIONARY_COUNT;
OrcProto.ColumnEncoding.Kind encodingKind = encoding.getKind();
return kind == OrcProto.Stream.Kind.DICTIONARY_DATA ||
(kind == OrcProto.Stream.Kind.LENGTH &&
(encodingKind == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
encodingKind == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2));
}
/**
* Build a string representation of a list of disk ranges.
* @param range ranges to stringify
* @return the resulting string
*/
public static String stringifyDiskRanges(DiskRangeList range) {
StringBuilder buffer = new StringBuilder();
buffer.append("[");
boolean isFirst = true;
while (range != null) {
if (!isFirst) {
buffer.append(", {");
} else {
buffer.append("{");
}
isFirst = false;
buffer.append(range);
buffer.append("}");
range = range.next;
}
buffer.append("]");
return buffer.toString();
}
static long computeEnd(BufferChunk first, BufferChunk last) {
long end = 0;
for(BufferChunk ptr=first; ptr != last.next; ptr = (BufferChunk) ptr.next) {
end = Math.max(ptr.getEnd(), end);
}
return end;
}
/**
* Zero-copy read the data from the file based on a list of ranges in a
* single read.
*
* As a side note, the HDFS zero copy API really sucks from a user's point of
* view.
*
* @param file the file we're reading from
* @param zcr the zero copy shim
* @param first the first range to read
* @param last the last range to read
* @param allocateDirect if we need to allocate buffers, should we use direct
* @throws IOException
*/
static void zeroCopyReadRanges(FSDataInputStream file,
HadoopShims.ZeroCopyReaderShim zcr,
BufferChunk first,
BufferChunk last,
boolean allocateDirect) throws IOException {
// read all of the bytes that we need
final long offset = first.getOffset();
int length = (int)(computeEnd(first, last) - offset);
file.seek(offset);
List<ByteBuffer> bytes = new ArrayList<>();
while (length > 0) {
ByteBuffer read= zcr.readBuffer(length, false);
bytes.add(read);
length -= read.remaining();
}
long currentOffset = offset;
// iterate and fill each range
BufferChunk current = first;
Iterator<ByteBuffer> buffers = bytes.iterator();
ByteBuffer currentBuffer = buffers.next();
while (current != last.next) {
// if we are past the start of the range, restart the iterator
if (current.getOffset() < offset) {
buffers = bytes.iterator();
currentBuffer = buffers.next();
currentOffset = offset;
}
// walk through the buffers to find the start of the buffer
while (currentOffset + currentBuffer.remaining() <= current.getOffset()) {
currentOffset += currentBuffer.remaining();
// We assume that buffers.hasNext is true because we know we read
// enough data to cover the last range.
currentBuffer = buffers.next();
}
// did we get the current range in a single read?
if (currentOffset + currentBuffer.remaining() >= current.getEnd()) {
ByteBuffer copy = currentBuffer.duplicate();
copy.position((int) (current.getOffset() - currentOffset));
copy.limit(copy.position() + current.getLength());
current.setChunk(copy);
} else {
// otherwise, build a single buffer that holds the entire range
ByteBuffer result = allocateDirect
? ByteBuffer.allocateDirect(current.getLength())
: ByteBuffer.allocate(current.getLength());
// we know that the range spans buffers
ByteBuffer copy = currentBuffer.duplicate();
// skip over the front matter
copy.position((int) (current.getOffset() - currentOffset));
result.put(copy);
// advance the buffer
currentOffset += currentBuffer.remaining();
currentBuffer = buffers.next();
while (result.hasRemaining()) {
if (result.remaining() > currentBuffer.remaining()) {
result.put(currentBuffer.duplicate());
currentOffset += currentBuffer.remaining();
currentBuffer = buffers.next();
} else {
copy = currentBuffer.duplicate();
copy.limit(result.remaining());
result.put(copy);
}
}
result.flip();
current.setChunk(result);
}
current = (BufferChunk) current.next;
}
}
/**
* Read the data from the file based on a list of ranges in a single read.
* @param file the file to read from
* @param first the first range to read
* @param last the last range to read
* @param allocateDirect should we use direct buffers
*/
static void readRanges(FSDataInputStream file,
BufferChunk first,
BufferChunk last,
boolean allocateDirect) throws IOException {
// assume that the chunks are sorted by offset
long offset = first.getOffset();
int readSize = (int) (computeEnd(first, last) - offset);
byte[] buffer = new byte[readSize];
try {
file.readFully(offset, buffer, 0, buffer.length);
} catch(IOException e) {
throw new IOException(String.format("Failed while reading %s %d:%d",
file,
offset,
buffer.length),
e);
}
// get the data into a ByteBuffer
ByteBuffer bytes;
if (allocateDirect) {
bytes = ByteBuffer.allocateDirect(readSize);
bytes.put(buffer);
bytes.flip();
} else {
bytes = ByteBuffer.wrap(buffer);
}
// populate each BufferChunks with the data
BufferChunk current = first;
while (current != last.next) {
ByteBuffer currentBytes = current == last ? bytes : bytes.duplicate();
currentBytes.position((int) (current.getOffset() - offset));
currentBytes.limit((int) (current.getEnd() - offset));
current.setChunk(currentBytes);
current = (BufferChunk) current.next;
}
}
/**
* Find the list of ranges that should be read in a single read.
* The read will stop when there is a gap, one of the ranges already has data,
* or we have reached the maximum read size of 2^31.
* @param first the first range to read
* @return the last range to read
*/
static BufferChunk findSingleRead(BufferChunk first) {
return findSingleRead(first, 0);
}
/**
* Find the list of ranges that should be read in a single read.
* The read will stop when there is a gap, one of the ranges already has data,
* or we have reached the maximum read size of 2^31.
* @param first the first range to read
* @param minSeekSize minimum size for seek instead of read
* @return the last range to read
*/
private static BufferChunk findSingleRead(BufferChunk first, long minSeekSize) {
BufferChunk last = first;
long currentEnd = first.getEnd();
while (last.next != null &&
!last.next.hasData() &&
last.next.getOffset() <= (currentEnd + minSeekSize) &&
last.next.getEnd() - first.getOffset() < Integer.MAX_VALUE) {
last = (BufferChunk) last.next;
currentEnd = Math.max(currentEnd, last.getEnd());
}
return last;
}
/**
* Read the list of ranges from the file by updating each range in the list
* with a buffer that has the bytes from the file.
*
* The ranges must be sorted, but may overlap or include holes.
*
* @param file the file to read
* @param zcr the zero copy shim
* @param list the disk ranges within the file to read
* @param doForceDirect allocate direct buffers
*/
static void readDiskRanges(FSDataInputStream file,
HadoopShims.ZeroCopyReaderShim zcr,
BufferChunkList list,
boolean doForceDirect) throws IOException {
readDiskRanges(file, zcr, list, doForceDirect, 0, 0);
}
/**
* Read the list of ranges from the file by updating each range in the list
* with a buffer that has the bytes from the file.
*
* The ranges must be sorted, but may overlap or include holes.
*
* @param file the file to read
* @param zcr the zero copy shim
* @param list the disk ranges within the file to read
* @param doForceDirect allocate direct buffers
* @param minSeekSize the minimum gap to prefer seek vs read
* @param minSeekSizeTolerance allowed tolerance for extra bytes in memory as a result of
* minSeekSize
*/
private static void readDiskRanges(FSDataInputStream file,
HadoopShims.ZeroCopyReaderShim zcr,
BufferChunkList list,
boolean doForceDirect,
int minSeekSize,
double minSeekSizeTolerance) throws IOException {
BufferChunk current = list == null ? null : list.get();
while (current != null) {
while (current.hasData()) {
current = (BufferChunk) current.next;
}
if (zcr != null) {
BufferChunk last = findSingleRead(current);
zeroCopyReadRanges(file, zcr, current, last, doForceDirect);
current = (BufferChunk) last.next;
} else {
ChunkReader chunkReader = ChunkReader.create(current, minSeekSize);
chunkReader.readRanges(file, doForceDirect, minSeekSizeTolerance);
current = (BufferChunk) chunkReader.to.next;
}
}
}
static HadoopShims.ZeroCopyReaderShim createZeroCopyShim(FSDataInputStream file,
CompressionCodec codec, ByteBufferAllocatorPool pool) throws IOException {
if ((codec == null || ((codec instanceof DirectDecompressionCodec) &&
((DirectDecompressionCodec) codec).isAvailable()))) {
/* codec is null or is available */
return SHIMS.getZeroCopyReader(file, pool);
}
return null;
}
// this is an implementation copied from ElasticByteBufferPool in hadoop-2,
// which lacks a clear()/clean() operation
public static final class ByteBufferAllocatorPool implements HadoopShims.ByteBufferPoolShim {
private static final class Key implements Comparable<Key> {
private final int capacity;
private final long insertionGeneration;
Key(int capacity, long insertionGeneration) {
this.capacity = capacity;
this.insertionGeneration = insertionGeneration;
}
@Override
public int compareTo(Key other) {
final int c = Integer.compare(capacity, other.capacity);
return (c != 0) ? c : Long.compare(insertionGeneration, other.insertionGeneration);
}
@Override
public boolean equals(Object rhs) {
if (rhs instanceof Key) {
Key o = (Key) rhs;
return 0 == compareTo(o);
}
return false;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(capacity).append(insertionGeneration)
.toHashCode();
}
}
private final TreeMap<Key, ByteBuffer> buffers = new TreeMap<>();
private final TreeMap<Key, ByteBuffer> directBuffers = new TreeMap<>();
private long currentGeneration = 0;
private TreeMap<Key, ByteBuffer> getBufferTree(boolean direct) {
return direct ? directBuffers : buffers;
}
public void clear() {
buffers.clear();
directBuffers.clear();
}
@Override
public ByteBuffer getBuffer(boolean direct, int length) {
TreeMap<Key, ByteBuffer> tree = getBufferTree(direct);
Map.Entry<Key, ByteBuffer> entry = tree.ceilingEntry(new Key(length, 0));
if (entry == null) {
return direct ? ByteBuffer.allocateDirect(length) : ByteBuffer
.allocate(length);
}
tree.remove(entry.getKey());
return entry.getValue();
}
@Override
public void putBuffer(ByteBuffer buffer) {
TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
Key key;
// Buffers are indexed by (capacity, generation).
// If our key is not unique on the first try, try again
do {
key = new Key(buffer.capacity(), currentGeneration++);
} while (tree.putIfAbsent(key, buffer) != null);
}
}
static class ChunkReader {
private final BufferChunk from;
private final BufferChunk to;
private final int readBytes;
private final int reqBytes;
private ChunkReader(BufferChunk from, BufferChunk to, int readSize, int reqBytes) {
this.from = from;
this.to = to;
this.readBytes = readSize;
this.reqBytes = reqBytes;
}
double getExtraBytesFraction() {
return (readBytes - reqBytes) / ((double) reqBytes);
}
public int getReadBytes() {
return readBytes;
}
public int getReqBytes() {
return reqBytes;
}
public BufferChunk getFrom() {
return from;
}
public BufferChunk getTo() {
return to;
}
void populateChunks(ByteBuffer bytes, boolean allocateDirect, double extraByteTolerance) {
if (getExtraBytesFraction() > extraByteTolerance) {
LOG.debug("ExtraBytesFraction = {}, ExtraByteTolerance = {}, reducing memory size",
getExtraBytesFraction(),
extraByteTolerance);
populateChunksReduceSize(bytes, allocateDirect);
} else {
LOG.debug("ExtraBytesFraction = {}, ExtraByteTolerance = {}, populating as is",
getExtraBytesFraction(),
extraByteTolerance);
populateChunksAsIs(bytes);
}
}
void populateChunksAsIs(ByteBuffer bytes) {
// populate each BufferChunks with the data
BufferChunk current = from;
long offset = from.getOffset();
while (current != to.next) {
ByteBuffer currentBytes = current == to ? bytes : bytes.duplicate();
currentBytes.position((int) (current.getOffset() - offset));
currentBytes.limit((int) (current.getEnd() - offset));
current.setChunk(currentBytes);
current = (BufferChunk) current.next;
}
}
void populateChunksReduceSize(ByteBuffer bytes, boolean allocateDirect) {
ByteBuffer newBuffer;
if (allocateDirect) {
newBuffer = ByteBuffer.allocateDirect(reqBytes);
newBuffer.position(reqBytes);
newBuffer.flip();
} else {
byte[] newBytes = new byte[reqBytes];
newBuffer = ByteBuffer.wrap(newBytes);
}
final long offset = from.getOffset();
int copyStart = 0;
int copyEnd;
int copyLength;
int skippedBytes = 0;
int srcPosition;
BufferChunk current = from;
while (current != to.next) {
// We can skip bytes as required, but no need to copy bytes that are already copied
srcPosition = (int) (current.getOffset() - offset);
skippedBytes += Math.max(0, srcPosition - copyStart);
copyStart = Math.max(copyStart, srcPosition);
copyEnd = (int) (current.getEnd() - offset);
copyLength = copyStart < copyEnd ? copyEnd - copyStart : 0;
newBuffer.put(bytes.array(), copyStart, copyLength);
copyStart += copyLength;
// Set up new ByteBuffer that wraps on the same backing array
ByteBuffer currentBytes = current == to ? newBuffer : newBuffer.duplicate();
currentBytes.position(srcPosition - skippedBytes);
currentBytes.limit(currentBytes.position() + current.getLength());
current.setChunk(currentBytes);
current = (BufferChunk) current.next;
}
}
/**
* Read the data from the file based on a list of ranges in a single read.
*
* @param file the file to read from
* @param allocateDirect should we use direct buffers
*/
void readRanges(FSDataInputStream file, boolean allocateDirect, double extraByteTolerance)
throws IOException {
// assume that the chunks are sorted by offset
long offset = from.getOffset();
int readSize = (int) (computeEnd(from, to) - offset);
byte[] buffer = new byte[readSize];
try {
file.readFully(offset, buffer, 0, buffer.length);
} catch (IOException e) {
throw new IOException(String.format("Failed while reading %s %d:%d",
file,
offset,
buffer.length),
e);
}
// get the data into a ByteBuffer
ByteBuffer bytes;
if (allocateDirect) {
bytes = ByteBuffer.allocateDirect(readSize);
bytes.put(buffer);
bytes.flip();
} else {
bytes = ByteBuffer.wrap(buffer);
}
// populate each BufferChunks with the data
populateChunks(bytes, allocateDirect, extraByteTolerance);
}
static ChunkReader create(BufferChunk from, BufferChunk to) {
long f = Integer.MAX_VALUE;
long e = Integer.MIN_VALUE;
long cf = Integer.MAX_VALUE;
long ef = Integer.MIN_VALUE;
int reqBytes = 0;
BufferChunk current = from;
while (current != to.next) {
f = Math.min(f, current.getOffset());
e = Math.max(e, current.getEnd());
if (ef == Integer.MIN_VALUE || current.getOffset() <= ef) {
cf = Math.min(cf, current.getOffset());
ef = Math.max(ef, current.getEnd());
} else {
reqBytes += ef - cf;
cf = current.getOffset();
ef = current.getEnd();
}
current = (BufferChunk) current.next;
}
reqBytes += ef - cf;
return new ChunkReader(from, to, (int) (e - f), reqBytes);
}
static ChunkReader create(BufferChunk from, int minSeekSize) {
BufferChunk to = findSingleRead(from, minSeekSize);
return create(from, to);
}
}
}
| 28,528 | 34.93073 | 98 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RedBlackTree.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
/**
* A memory efficient red-black tree that does not allocate any objects per
* an element. This class is abstract and assumes that the child class
* handles the key and comparisons with the key.
*/
abstract class RedBlackTree {
public static final int NULL = -1;
// Various values controlling the offset of the data within the array.
private static final int LEFT_OFFSET = 0;
private static final int RIGHT_OFFSET = 1;
private static final int ELEMENT_SIZE = 2;
protected int size = 0;
private final DynamicIntArray data;
protected int root = NULL;
protected int lastAdd = 0;
private boolean wasAdd = false;
/**
* Create a set with the given initial capacity.
*/
RedBlackTree(int initialCapacity) {
data = new DynamicIntArray(initialCapacity * ELEMENT_SIZE);
}
/**
* Insert a new node into the data array, growing the array as necessary.
*
* @return Returns the position of the new node.
*/
private int insert(int left, int right, boolean isRed) {
int position = size;
size += 1;
setLeft(position, left, isRed);
setRight(position, right);
return position;
}
/**
* Compare the value at the given position to the new value.
* @return 0 if the values are the same, -1 if the new value is smaller and
* 1 if the new value is larger.
*/
protected abstract int compareValue(int position);
/**
* Is the given node red as opposed to black? To prevent having an extra word
* in the data array, we just the low bit on the left child index.
*/
protected boolean isRed(int position) {
return position != NULL &&
(data.get(position * ELEMENT_SIZE + LEFT_OFFSET) & 1) == 1;
}
/**
* Set the red bit true or false.
*/
private void setRed(int position, boolean isRed) {
int offset = position * ELEMENT_SIZE + LEFT_OFFSET;
if (isRed) {
data.set(offset, data.get(offset) | 1);
} else {
data.set(offset, data.get(offset) & ~1);
}
}
/**
* Get the left field of the given position.
*/
protected int getLeft(int position) {
return data.get(position * ELEMENT_SIZE + LEFT_OFFSET) >> 1;
}
/**
* Get the right field of the given position.
*/
protected int getRight(int position) {
return data.get(position * ELEMENT_SIZE + RIGHT_OFFSET);
}
/**
* Set the left field of the given position.
* Note that we are storing the node color in the low bit of the left pointer.
*/
private void setLeft(int position, int left) {
int offset = position * ELEMENT_SIZE + LEFT_OFFSET;
data.set(offset, (left << 1) | (data.get(offset) & 1));
}
/**
* Set the left field of the given position.
* Note that we are storing the node color in the low bit of the left pointer.
*/
private void setLeft(int position, int left, boolean isRed) {
int offset = position * ELEMENT_SIZE + LEFT_OFFSET;
data.set(offset, (left << 1) | (isRed ? 1 : 0));
}
/**
* Set the right field of the given position.
*/
private void setRight(int position, int right) {
data.set(position * ELEMENT_SIZE + RIGHT_OFFSET, right);
}
/**
* Insert or find a given key in the tree and rebalance the tree correctly.
* Rebalancing restores the red-black aspect of the tree to maintain the
* invariants:
* 1. If a node is red, both of its children are black.
* 2. Each child of a node has the same black height (the number of black
* nodes between it and the leaves of the tree).
*
* Inserted nodes are at the leaves and are red, therefore there is at most a
* violation of rule 1 at the node we just put in. Instead of always keeping
* the parents, this routine passing down the context.
*
* The fix is broken down into 6 cases (1.{1,2,3} and 2.{1,2,3} that are
* left-right mirror images of each other). See Algorithms by Cormen,
* Leiserson, and Rivest for the explanation of the subcases.
*
* @param node The node that we are fixing right now.
* @param fromLeft Did we come down from the left?
* @param parent Nodes' parent
* @param grandparent Parent's parent
* @param greatGrandparent Grandparent's parent
* @return Does parent also need to be checked and/or fixed?
*/
private boolean add(int node, boolean fromLeft, int parent,
int grandparent, int greatGrandparent) {
if (node == NULL) {
if (root == NULL) {
lastAdd = insert(NULL, NULL, false);
root = lastAdd;
wasAdd = true;
return false;
} else {
lastAdd = insert(NULL, NULL, true);
node = lastAdd;
wasAdd = true;
// connect the new node into the tree
if (fromLeft) {
setLeft(parent, node);
} else {
setRight(parent, node);
}
}
} else {
int compare = compareValue(node);
boolean keepGoing;
// Recurse down to find where the node needs to be added
if (compare < 0) {
keepGoing = add(getLeft(node), true, node, parent, grandparent);
} else if (compare > 0) {
keepGoing = add(getRight(node), false, node, parent, grandparent);
} else {
lastAdd = node;
wasAdd = false;
return false;
}
// we don't need to fix the root (because it is always set to black)
if (node == root || !keepGoing) {
return false;
}
}
// Do we need to fix this node? Only if there are two reds right under each
// other.
if (isRed(node) && isRed(parent)) {
if (parent == getLeft(grandparent)) {
int uncle = getRight(grandparent);
if (isRed(uncle)) {
// case 1.1
setRed(parent, false);
setRed(uncle, false);
setRed(grandparent, true);
return true;
} else {
if (node == getRight(parent)) {
// case 1.2
// swap node and parent
int tmp = node;
node = parent;
parent = tmp;
// left-rotate on node
setLeft(grandparent, parent);
setRight(node, getLeft(parent));
setLeft(parent, node);
}
// case 1.2 and 1.3
setRed(parent, false);
setRed(grandparent, true);
// right-rotate on grandparent
if (greatGrandparent == NULL) {
root = parent;
} else if (getLeft(greatGrandparent) == grandparent) {
setLeft(greatGrandparent, parent);
} else {
setRight(greatGrandparent, parent);
}
setLeft(grandparent, getRight(parent));
setRight(parent, grandparent);
return false;
}
} else {
int uncle = getLeft(grandparent);
if (isRed(uncle)) {
// case 2.1
setRed(parent, false);
setRed(uncle, false);
setRed(grandparent, true);
return true;
} else {
if (node == getLeft(parent)) {
// case 2.2
// swap node and parent
int tmp = node;
node = parent;
parent = tmp;
// right-rotate on node
setRight(grandparent, parent);
setLeft(node, getRight(parent));
setRight(parent, node);
}
// case 2.2 and 2.3
setRed(parent, false);
setRed(grandparent, true);
// left-rotate on grandparent
if (greatGrandparent == NULL) {
root = parent;
} else if (getRight(greatGrandparent) == grandparent) {
setRight(greatGrandparent, parent);
} else {
setLeft(greatGrandparent, parent);
}
setRight(grandparent, getLeft(parent));
setLeft(parent, grandparent);
return false;
}
}
} else {
return true;
}
}
/**
* Add the new key to the tree.
* @return true if the element is a new one.
*/
protected boolean add() {
add(root, false, NULL, NULL, NULL);
if (wasAdd) {
setRed(root, false);
return true;
} else {
return false;
}
}
/**
* Get the number of elements in the set.
*/
public int size() {
return size;
}
/**
* Reset the table to empty.
*/
public void clear() {
root = NULL;
size = 0;
data.clear();
}
/**
* Get the buffer size in bytes.
*/
public long getSizeInBytes() {
return data.getSizeInBytes();
}
}
| 9,326 | 29.087097 | 80 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthByteReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import java.io.EOFException;
import java.io.IOException;
/**
* A reader that reads a sequence of bytes. A control byte is read before
* each run with positive values 0 to 127 meaning 3 to 130 repetitions. If the
* byte is -1 to -128, 1 to 128 literal byte values follow.
*/
public class RunLengthByteReader {
private InStream input;
private final byte[] literals =
new byte[RunLengthByteWriter.MAX_LITERAL_SIZE];
private int numLiterals = 0;
private int used = 0;
private boolean repeat = false;
public RunLengthByteReader(InStream input) {
this.input = input;
}
public void setInStream(InStream input) {
this.input = input;
}
private void readValues(boolean ignoreEof, int numSkipRows) throws IOException {
int control = input.read();
used = 0;
if (control == -1) {
if (!ignoreEof) {
throw new EOFException("Read past end of buffer RLE byte from " + input);
}
used = numLiterals = 0;
} else if (control < 0x80) {
repeat = true;
numLiterals = control + RunLengthByteWriter.MIN_REPEAT_SIZE;
if (numSkipRows >= numLiterals) {
IOUtils.skipFully(input,1);
} else {
int val = input.read();
if (val == -1) {
throw new EOFException("Reading RLE byte got EOF");
}
literals[0] = (byte) val;
}
} else {
repeat = false;
numLiterals = 0x100 - control;
numSkipRows = Math.min(numSkipRows, numLiterals);
if (numSkipRows > 0) {
IOUtils.skipFully(input, numSkipRows);
}
int bytes = numSkipRows;
while (bytes < numLiterals) {
int result = input.read(literals, bytes, numLiterals - bytes);
if (result == -1) {
throw new EOFException("Reading RLE byte literal got EOF in " + this);
}
bytes += result;
}
}
}
public boolean hasNext() throws IOException {
return used != numLiterals || input.available() > 0;
}
public byte next() throws IOException {
byte result;
if (used == numLiterals) {
readValues(false, 0);
}
if (repeat) {
result = literals[0];
} else {
result = literals[used];
}
++used;
return result;
}
public void nextVector(ColumnVector previous, long[] data, long size)
throws IOException {
previous.isRepeating = true;
for (int i = 0; i < size; i++) {
if (!previous.isNull[i]) {
data[i] = next();
} else {
// The default value of null for int types in vectorized
// processing is 1, so set that if the value is null
data[i] = 1;
}
// The default value for nulls in Vectorization for int types is 1
// and given that non null value can also be 1, we need to check for isNull also
// when determining the isRepeating flag.
if (previous.isRepeating && i > 0 &&
((data[0] != data[i]) || (previous.isNull[0] != previous.isNull[i]))) {
previous.isRepeating = false;
}
}
}
/**
* Read the next size bytes into the data array, skipping over any slots
* where isNull is true.
* @param isNull if non-null, skip any rows where isNull[r] is true
* @param data the array to read into
* @param size the number of elements to read
* @throws IOException
*/
public void nextVector(boolean[] isNull, int[] data,
long size) throws IOException {
if (isNull == null) {
for(int i=0; i < size; ++i) {
data[i] = next();
}
} else {
for(int i=0; i < size; ++i) {
if (!isNull[i]) {
data[i] = next();
}
}
}
}
public void seek(PositionProvider index) throws IOException {
input.seek(index);
int consumed = (int) index.getNext();
if (consumed != 0) {
// a loop is required for cases where we break the run into two parts
while (consumed > 0) {
readValues(false, 0);
used = consumed;
consumed -= numLiterals;
}
} else {
used = 0;
numLiterals = 0;
}
}
public void skip(long items) throws IOException {
while (items > 0) {
if (used == numLiterals) {
readValues(false, (int) items);
}
long consume = Math.min(items, numLiterals - used);
used += consume;
items -= consume;
}
}
@Override
public String toString() {
return "byte rle " + (repeat ? "repeat" : "literal") + " used: " +
used + "/" + numLiterals + " from " + input;
}
}
| 5,411 | 29.066667 | 86 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthByteWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.util.function.Consumer;
/**
* A streamFactory that writes a sequence of bytes. A control byte is written before
* each run with positive values 0 to 127 meaning 2 to 129 repetitions. If the
* bytes is -1 to -128, 1 to 128 literal byte values follow.
*/
public class RunLengthByteWriter {
static final int MIN_REPEAT_SIZE = 3;
static final int MAX_LITERAL_SIZE = 128;
static final int MAX_REPEAT_SIZE= 127 + MIN_REPEAT_SIZE;
private final PositionedOutputStream output;
private final byte[] literals = new byte[MAX_LITERAL_SIZE];
private int numLiterals = 0;
private boolean repeat = false;
private int tailRunLength = 0;
public RunLengthByteWriter(PositionedOutputStream output) {
this.output = output;
}
private void writeValues() throws IOException {
if (numLiterals != 0) {
if (repeat) {
output.write(numLiterals - MIN_REPEAT_SIZE);
output.write(literals, 0, 1);
} else {
output.write(-numLiterals);
output.write(literals, 0, numLiterals);
}
repeat = false;
tailRunLength = 0;
numLiterals = 0;
}
}
public void flush() throws IOException {
writeValues();
output.flush();
}
public void write(byte value) throws IOException {
if (numLiterals == 0) {
literals[numLiterals++] = value;
tailRunLength = 1;
} else if (repeat) {
if (value == literals[0]) {
numLiterals += 1;
if (numLiterals == MAX_REPEAT_SIZE) {
writeValues();
}
} else {
writeValues();
literals[numLiterals++] = value;
tailRunLength = 1;
}
} else {
if (value == literals[numLiterals - 1]) {
tailRunLength += 1;
} else {
tailRunLength = 1;
}
if (tailRunLength == MIN_REPEAT_SIZE) {
if (numLiterals + 1 == MIN_REPEAT_SIZE) {
repeat = true;
numLiterals += 1;
} else {
numLiterals -= MIN_REPEAT_SIZE - 1;
writeValues();
literals[0] = value;
repeat = true;
numLiterals = MIN_REPEAT_SIZE;
}
} else {
literals[numLiterals++] = value;
if (numLiterals == MAX_LITERAL_SIZE) {
writeValues();
}
}
}
}
public void getPosition(PositionRecorder recorder) throws IOException {
output.getPosition(recorder);
recorder.addPosition(numLiterals);
}
public long estimateMemory() {
return output.getBufferSize() + MAX_LITERAL_SIZE;
}
public void changeIv(Consumer<byte[]> modifier) {
output.changeIv(modifier);
}
}
| 3,474 | 28.956897 | 84 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import java.io.EOFException;
import java.io.IOException;
/**
* A reader that reads a sequence of integers.
* */
public class RunLengthIntegerReader implements IntegerReader {
private InStream input;
private final boolean signed;
private final long[] literals =
new long[RunLengthIntegerWriter.MAX_LITERAL_SIZE];
private int numLiterals = 0;
private int delta = 0;
private int used = 0;
private boolean repeat = false;
public RunLengthIntegerReader(InStream input, boolean signed) throws IOException {
this.input = input;
this.signed = signed;
}
private void readValues(boolean ignoreEof) throws IOException {
int control = input.read();
if (control == -1) {
if (!ignoreEof) {
throw new EOFException("Read past end of RLE integer from " + input);
}
used = numLiterals = 0;
return;
} else if (control < 0x80) {
numLiterals = control + RunLengthIntegerWriter.MIN_REPEAT_SIZE;
used = 0;
repeat = true;
delta = input.read();
if (delta == -1) {
throw new EOFException("End of stream in RLE Integer from " + input);
}
// convert from 0 to 255 to -128 to 127 by converting to a signed byte
delta = (byte) (0 + delta);
if (signed) {
literals[0] = SerializationUtils.readVslong(input);
} else {
literals[0] = SerializationUtils.readVulong(input);
}
} else {
repeat = false;
numLiterals = 0x100 - control;
used = 0;
for(int i=0; i < numLiterals; ++i) {
if (signed) {
literals[i] = SerializationUtils.readVslong(input);
} else {
literals[i] = SerializationUtils.readVulong(input);
}
}
}
}
@Override
public boolean hasNext() throws IOException {
return used != numLiterals || input.available() > 0;
}
@Override
public long next() throws IOException {
long result;
if (used == numLiterals) {
readValues(false);
}
if (repeat) {
result = literals[0] + (used++) * delta;
} else {
result = literals[used++];
}
return result;
}
@Override
public void nextVector(ColumnVector previous,
long[] data,
int previousLen) throws IOException {
previous.isRepeating = true;
for (int i = 0; i < previousLen; i++) {
if (!previous.isNull[i]) {
data[i] = next();
} else {
// The default value of null for int type in vectorized
// processing is 1, so set that if the value is null
data[i] = 1;
}
// The default value for nulls in Vectorization for int types is 1
// and given that non null value can also be 1, we need to check for isNull also
// when determining the isRepeating flag.
if (previous.isRepeating && i > 0 &&
(data[0] != data[i] || previous.isNull[0] != previous.isNull[i])) {
previous.isRepeating = false;
}
}
}
@Override
public void nextVector(ColumnVector vector,
int[] data,
int size) throws IOException {
if (vector.noNulls) {
for(int r=0; r < data.length && r < size; ++r) {
data[r] = (int) next();
}
} else if (!(vector.isRepeating && vector.isNull[0])) {
for(int r=0; r < data.length && r < size; ++r) {
if (!vector.isNull[r]) {
data[r] = (int) next();
} else {
data[r] = 1;
}
}
}
}
@Override
public void seek(PositionProvider index) throws IOException {
input.seek(index);
int consumed = (int) index.getNext();
if (consumed != 0) {
// a loop is required for cases where we break the run into two parts
while (consumed > 0) {
readValues(false);
used = consumed;
consumed -= numLiterals;
}
} else {
used = 0;
numLiterals = 0;
}
}
@Override
public void skip(long numValues) throws IOException {
while (numValues > 0) {
if (used == numLiterals) {
readValues(false);
}
long consume = Math.min(numValues, numLiterals - used);
used += consume;
numValues -= consume;
}
}
}
| 5,119 | 28.94152 | 86 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.util.Arrays;
/**
* A reader that reads a sequence of light weight compressed integers. Refer
* {@link RunLengthIntegerWriterV2} for description of various lightweight
* compression techniques.
*/
public class RunLengthIntegerReaderV2 implements IntegerReader {
public static final Logger LOG = LoggerFactory.getLogger(RunLengthIntegerReaderV2.class);
private InStream input;
private final boolean signed;
private final long[] literals = new long[RunLengthIntegerWriterV2.MAX_SCOPE];
private int numLiterals = 0;
private int used = 0;
private final boolean skipCorrupt;
private final SerializationUtils utils;
private RunLengthIntegerWriterV2.EncodingType currentEncoding;
public RunLengthIntegerReaderV2(InStream input, boolean signed,
boolean skipCorrupt) throws IOException {
this.input = input;
this.signed = signed;
this.skipCorrupt = skipCorrupt;
this.utils = new SerializationUtils();
}
private static final RunLengthIntegerWriterV2.EncodingType[] encodings =
RunLengthIntegerWriterV2.EncodingType.values();
private void readValues(boolean ignoreEof) throws IOException {
// read the first 2 bits and determine the encoding type
int firstByte = input.read();
if (firstByte < 0) {
if (!ignoreEof) {
throw new EOFException("Read past end of RLE integer from " + input);
}
used = numLiterals = 0;
return;
}
currentEncoding = encodings[(firstByte >>> 6) & 0x03];
switch (currentEncoding) {
case SHORT_REPEAT:
readShortRepeatValues(firstByte);
break;
case DIRECT:
readDirectValues(firstByte);
break;
case PATCHED_BASE:
readPatchedBaseValues(firstByte);
break;
case DELTA:
readDeltaValues(firstByte);
break;
default:
throw new IOException("Unknown encoding " + currentEncoding);
}
}
private void readDeltaValues(int firstByte) throws IOException {
// extract the number of fixed bits
int fb = (firstByte >>> 1) & 0x1f;
if (fb != 0) {
fb = utils.decodeBitWidth(fb);
}
// extract the blob run length
int len = (firstByte & 0x01) << 8;
len |= input.read();
// read the first value stored as vint
long firstVal = 0;
if (signed) {
firstVal = SerializationUtils.readVslong(input);
} else {
firstVal = SerializationUtils.readVulong(input);
}
// store first value to result buffer
long prevVal = firstVal;
literals[numLiterals++] = firstVal;
// if fixed bits is 0 then all values have fixed delta
if (fb == 0) {
// read the fixed delta value stored as vint (deltas can be negative even
// if all number are positive)
long fd = SerializationUtils.readVslong(input);
if (fd == 0) {
assert numLiterals == 1;
Arrays.fill(literals, numLiterals, numLiterals + len, literals[0]);
numLiterals += len;
} else {
// add fixed deltas to adjacent values
for(int i = 0; i < len; i++) {
literals[numLiterals++] = literals[numLiterals - 2] + fd;
}
}
} else {
long deltaBase = SerializationUtils.readVslong(input);
// add delta base and first value
literals[numLiterals++] = firstVal + deltaBase;
prevVal = literals[numLiterals - 1];
len -= 1;
// write the unpacked values, add it to previous value and store final
// value to result buffer. if the delta base value is negative then it
// is a decreasing sequence else an increasing sequence
utils.readInts(literals, numLiterals, len, fb, input);
while (len > 0) {
if (deltaBase < 0) {
literals[numLiterals] = prevVal - literals[numLiterals];
} else {
literals[numLiterals] = prevVal + literals[numLiterals];
}
prevVal = literals[numLiterals];
len--;
numLiterals++;
}
}
}
private void readPatchedBaseValues(int firstByte) throws IOException {
// extract the number of fixed bits
int fbo = (firstByte >>> 1) & 0x1f;
int fb = utils.decodeBitWidth(fbo);
// extract the run length of data blob
int len = (firstByte & 0x01) << 8;
len |= input.read();
// runs are always one off
len += 1;
// extract the number of bytes occupied by base
int thirdByte = input.read();
int bw = (thirdByte >>> 5) & 0x07;
// base width is one off
bw += 1;
// extract patch width
int pwo = thirdByte & 0x1f;
int pw = utils.decodeBitWidth(pwo);
// read fourth byte and extract patch gap width
int fourthByte = input.read();
int pgw = (fourthByte >>> 5) & 0x07;
// patch gap width is one off
pgw += 1;
// extract the length of the patch list
int pl = fourthByte & 0x1f;
// read the next base width number of bytes to extract base value
long base = utils.bytesToLongBE(input, bw);
long mask = (1L << ((bw * 8) - 1));
// if MSB of base value is 1 then base is negative value else positive
if ((base & mask) != 0) {
base = base & ~mask;
base = -base;
}
// unpack the data blob
long[] unpacked = new long[len];
utils.readInts(unpacked, 0, len, fb, input);
// unpack the patch blob
long[] unpackedPatch = new long[pl];
if ((pw + pgw) > 64 && !skipCorrupt) {
throw new IOException("Corruption in ORC data encountered. To skip" +
" reading corrupted data, set hive.exec.orc.skip.corrupt.data to" +
" true");
}
int bitSize = utils.getClosestFixedBits(pw + pgw);
utils.readInts(unpackedPatch, 0, pl, bitSize, input);
// apply the patch directly when decoding the packed data
int patchIdx = 0;
long currGap = 0;
long currPatch = 0;
long patchMask = ((1L << pw) - 1);
currGap = unpackedPatch[patchIdx] >>> pw;
currPatch = unpackedPatch[patchIdx] & patchMask;
long actualGap = 0;
// special case: gap is >255 then patch value will be 0.
// if gap is <=255 then patch value cannot be 0
while (currGap == 255 && currPatch == 0) {
actualGap += 255;
patchIdx++;
currGap = unpackedPatch[patchIdx] >>> pw;
currPatch = unpackedPatch[patchIdx] & patchMask;
}
// add the left over gap
actualGap += currGap;
// unpack data blob, patch it (if required), add base to get final result
for(int i = 0; i < unpacked.length; i++) {
if (i == actualGap) {
// extract the patch value
long patchedVal = unpacked[i] | (currPatch << fb);
// add base to patched value
literals[numLiterals++] = base + patchedVal;
// increment the patch to point to next entry in patch list
patchIdx++;
if (patchIdx < pl) {
// read the next gap and patch
currGap = unpackedPatch[patchIdx] >>> pw;
currPatch = unpackedPatch[patchIdx] & patchMask;
actualGap = 0;
// special case: gap is >255 then patch will be 0. if gap is
// <=255 then patch cannot be 0
while (currGap == 255 && currPatch == 0) {
actualGap += 255;
patchIdx++;
currGap = unpackedPatch[patchIdx] >>> pw;
currPatch = unpackedPatch[patchIdx] & patchMask;
}
// add the left over gap
actualGap += currGap;
// next gap is relative to the current gap
actualGap += i;
}
} else {
// no patching required. add base to unpacked value to get final value
literals[numLiterals++] = base + unpacked[i];
}
}
}
private void readDirectValues(int firstByte) throws IOException {
// extract the number of fixed bits
int fbo = (firstByte >>> 1) & 0x1f;
int fb = utils.decodeBitWidth(fbo);
// extract the run length
int len = (firstByte & 0x01) << 8;
len |= input.read();
// runs are one off
len += 1;
// write the unpacked values and zigzag decode to result buffer
utils.readInts(literals, numLiterals, len, fb, input);
if (signed) {
for(int i = 0; i < len; i++) {
literals[numLiterals] = utils.zigzagDecode(literals[numLiterals]);
numLiterals++;
}
} else {
numLiterals += len;
}
}
private void readShortRepeatValues(int firstByte) throws IOException {
// read the number of bytes occupied by the value
int size = (firstByte >>> 3) & 0x07;
// #bytes are one off
size += 1;
// read the run length
int len = firstByte & 0x07;
// run lengths values are stored only after MIN_REPEAT value is met
len += RunLengthIntegerWriterV2.MIN_REPEAT;
// read the repeated value which is store using fixed bytes
long val = utils.bytesToLongBE(input, size);
if (signed) {
val = utils.zigzagDecode(val);
}
if (numLiterals != 0) {
// Currently this always holds, which makes peekNextAvailLength simpler.
// If this changes, peekNextAvailLength should be adjusted accordingly.
throw new AssertionError("readValues called with existing values present");
}
// repeat the value for length times
// TODO: this is not so useful and V1 reader doesn't do that. Fix? Same if delta == 0
for(int i = 0; i < len; i++) {
literals[i] = val;
}
numLiterals = len;
}
@Override
public boolean hasNext() throws IOException {
return used != numLiterals || input.available() > 0;
}
@Override
public long next() throws IOException {
long result;
if (used == numLiterals) {
numLiterals = 0;
used = 0;
readValues(false);
}
result = literals[used++];
return result;
}
@Override
public void seek(PositionProvider index) throws IOException {
input.seek(index);
int consumed = (int) index.getNext();
if (consumed != 0) {
// a loop is required for cases where we break the run into two
// parts
while (consumed > 0) {
numLiterals = 0;
readValues(false);
used = consumed;
consumed -= numLiterals;
}
} else {
used = 0;
numLiterals = 0;
}
}
@Override
public void skip(long numValues) throws IOException {
while (numValues > 0) {
if (used == numLiterals) {
numLiterals = 0;
used = 0;
readValues(false);
}
long consume = Math.min(numValues, numLiterals - used);
used += consume;
numValues -= consume;
}
}
@Override
public void nextVector(ColumnVector previous,
long[] data,
int previousLen) throws IOException {
// if all nulls, just return
if (previous.isRepeating && !previous.noNulls && previous.isNull[0]) {
return;
}
previous.isRepeating = true;
for (int i = 0; i < previousLen; i++) {
if (previous.noNulls || !previous.isNull[i]) {
data[i] = next();
} else {
// The default value of null for int type in vectorized
// processing is 1, so set that if the value is null
data[i] = 1;
}
// The default value for nulls in Vectorization for int types is 1
// and given that non null value can also be 1, we need to check for isNull also
// when determining the isRepeating flag.
if (previous.isRepeating && i > 0 &&
(data[0] != data[i] || previous.isNull[0] != previous.isNull[i])) {
previous.isRepeating = false;
}
}
}
@Override
public void nextVector(ColumnVector vector, int[] data, int size) throws IOException {
final int batchSize = Math.min(data.length, size);
if (vector.noNulls) {
for (int r = 0; r < batchSize; ++r) {
data[r] = (int) next();
}
} else if (!(vector.isRepeating && vector.isNull[0])) {
for (int r = 0; r < batchSize; ++r) {
data[r] = (vector.isNull[r]) ? 1 : (int) next();
}
}
}
}
| 12,969 | 30.634146 | 91 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthIntegerWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.util.function.Consumer;
/**
* A streamFactory that writes a sequence of integers. A control byte is written before
* each run with positive values 0 to 127 meaning 3 to 130 repetitions, each
* repetition is offset by a delta. If the control byte is -1 to -128, 1 to 128
* literal vint values follow.
*/
public class RunLengthIntegerWriter implements IntegerWriter {
static final int MIN_REPEAT_SIZE = 3;
static final int MAX_DELTA = 127;
static final int MIN_DELTA = -128;
static final int MAX_LITERAL_SIZE = 128;
private static final int MAX_REPEAT_SIZE = 127 + MIN_REPEAT_SIZE;
private final PositionedOutputStream output;
private final boolean signed;
private final long[] literals = new long[MAX_LITERAL_SIZE];
private int numLiterals = 0;
private long delta = 0;
private boolean repeat = false;
private int tailRunLength = 0;
private SerializationUtils utils;
public RunLengthIntegerWriter(PositionedOutputStream output,
boolean signed) {
this.output = output;
this.signed = signed;
this.utils = new SerializationUtils();
}
private void writeValues() throws IOException {
if (numLiterals != 0) {
if (repeat) {
output.write(numLiterals - MIN_REPEAT_SIZE);
output.write((byte) delta);
if (signed) {
utils.writeVslong(output, literals[0]);
} else {
utils.writeVulong(output, literals[0]);
}
} else {
output.write(-numLiterals);
for(int i=0; i < numLiterals; ++i) {
if (signed) {
utils.writeVslong(output, literals[i]);
} else {
utils.writeVulong(output, literals[i]);
}
}
}
repeat = false;
numLiterals = 0;
tailRunLength = 0;
}
}
@Override
public void flush() throws IOException {
writeValues();
output.flush();
}
@Override
public void write(long value) throws IOException {
if (numLiterals == 0) {
literals[numLiterals++] = value;
tailRunLength = 1;
} else if (repeat) {
if (value == literals[0] + delta * numLiterals) {
numLiterals += 1;
if (numLiterals == MAX_REPEAT_SIZE) {
writeValues();
}
} else {
writeValues();
literals[numLiterals++] = value;
tailRunLength = 1;
}
} else {
if (tailRunLength == 1) {
delta = value - literals[numLiterals - 1];
if (delta < MIN_DELTA || delta > MAX_DELTA) {
tailRunLength = 1;
} else {
tailRunLength = 2;
}
} else if (value == literals[numLiterals - 1] + delta) {
tailRunLength += 1;
} else {
delta = value - literals[numLiterals - 1];
if (delta < MIN_DELTA || delta > MAX_DELTA) {
tailRunLength = 1;
} else {
tailRunLength = 2;
}
}
if (tailRunLength == MIN_REPEAT_SIZE) {
if (numLiterals + 1 == MIN_REPEAT_SIZE) {
repeat = true;
numLiterals += 1;
} else {
numLiterals -= MIN_REPEAT_SIZE - 1;
long base = literals[numLiterals];
writeValues();
literals[0] = base;
repeat = true;
numLiterals = MIN_REPEAT_SIZE;
}
} else {
literals[numLiterals++] = value;
if (numLiterals == MAX_LITERAL_SIZE) {
writeValues();
}
}
}
}
@Override
public void getPosition(PositionRecorder recorder) throws IOException {
output.getPosition(recorder);
recorder.addPosition(numLiterals);
}
@Override
public long estimateMemory() {
return output.getBufferSize();
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
output.changeIv(modifier);
}
}
| 4,662 | 29.279221 | 87 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/RunLengthIntegerWriterV2.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.util.function.Consumer;
/**
* <p>A writer that performs light weight compression over sequence of integers.
* </p>
* <p>There are four types of lightweight integer compression</p>
* <ul>
* <li>SHORT_REPEAT</li>
* <li>DIRECT</li>
* <li>PATCHED_BASE</li>
* <li>DELTA</li>
* </ul>
* <p>The description and format for these types are as below:
* <b>SHORT_REPEAT:</b> Used for short repeated integer sequences.</p>
* <ul>
* <li>1 byte header
* <ul>
* <li>2 bits for encoding type</li>
* <li>3 bits for bytes required for repeating value</li>
* <li>3 bits for repeat count (MIN_REPEAT + run length)</li>
* </ul>
* </li>
* <li>Blob - repeat value (fixed bytes)</li>
* </ul>
* <p>
* <b>DIRECT:</b> Used for random integer sequences whose number of bit
* requirement doesn't vary a lot.</p>
* <ul>
* <li>2 byte header (1st byte)
* <ul>
* <li>2 bits for encoding type</li>
* <li>5 bits for fixed bit width of values in blob</li>
* <li>1 bit for storing MSB of run length</li>
* </ul></li>
* <li>2nd byte
* <ul>
* <li>8 bits for lower run length bits</li>
* </ul>
* </li>
* <li>Blob - stores the direct values using fixed bit width. The length of the
* data blob is (fixed width * run length) bits long</li>
* </ul>
* <p>
* <b>PATCHED_BASE:</b> Used for random integer sequences whose number of bit
* requirement varies beyond a threshold.</p>
* <ul>
* <li>4 bytes header (1st byte)
* <ul>
* <li>2 bits for encoding type</li>
* <li>5 bits for fixed bit width of values in blob</li>
* <li>1 bit for storing MSB of run length</li>
* </ul></li>
* <li>2nd byte
* <ul>
* <li>8 bits for lower run length bits</li>
* </ul></li>
* <li>3rd byte
* <ul>
* <li>3 bits for bytes required to encode base value</li>
* <li>5 bits for patch width</li>
* </ul></li>
* <li>4th byte
* <ul>
* <li>3 bits for patch gap width</li>
* <li>5 bits for patch length</li>
* </ul>
* </li>
* <li>Base value - Stored using fixed number of bytes. If MSB is set, base
* value is negative else positive. Length of base value is (base width * 8)
* bits.</li>
* <li>Data blob - Base reduced values as stored using fixed bit width. Length
* of data blob is (fixed width * run length) bits.</li>
* <li>Patch blob - Patch blob is a list of gap and patch value. Each entry in
* the patch list is (patch width + patch gap width) bits long. Gap between the
* subsequent elements to be patched are stored in upper part of entry whereas
* patch values are stored in lower part of entry. Length of patch blob is
* ((patch width + patch gap width) * patch length) bits.</li>
* </ul>
* <p>
* <b>DELTA</b> Used for monotonically increasing or decreasing sequences,
* sequences with fixed delta values or long repeated sequences.
* <ul>
* <li>2 bytes header (1st byte)
* <ul>
* <li>2 bits for encoding type</li>
* <li>5 bits for fixed bit width of values in blob</li>
* <li>1 bit for storing MSB of run length</li>
* </ul></li>
* <li>2nd byte
* <ul>
* <li>8 bits for lower run length bits</li>
* </ul></li>
* <li>Base value - zigzag encoded value written as varint</li>
* <li>Delta base - zigzag encoded value written as varint</li>
* <li>Delta blob - only positive values. monotonicity and orderness are decided
* based on the sign of the base value and delta base</li>
* </ul>
*/
public class RunLengthIntegerWriterV2 implements IntegerWriter {
public enum EncodingType {
SHORT_REPEAT, DIRECT, PATCHED_BASE, DELTA
}
static final int MAX_SCOPE = 512;
static final int MIN_REPEAT = 3;
static final long BASE_VALUE_LIMIT = 1L << 56;
private static final int MAX_SHORT_REPEAT_LENGTH = 10;
private long prevDelta = 0;
private int fixedRunLength = 0;
private int variableRunLength = 0;
private final long[] literals = new long[MAX_SCOPE];
private final PositionedOutputStream output;
private final boolean signed;
private EncodingType encoding;
private int numLiterals;
private final long[] zigzagLiterals;
private final long[] baseRedLiterals = new long[MAX_SCOPE];
private final long[] adjDeltas = new long[MAX_SCOPE];
private long fixedDelta;
private int zzBits90p;
private int zzBits100p;
private int brBits95p;
private int brBits100p;
private int bitsDeltaMax;
private int patchWidth;
private int patchGapWidth;
private int patchLength;
private long[] gapVsPatchList;
private long min;
private boolean isFixedDelta;
private SerializationUtils utils;
private boolean alignedBitpacking;
RunLengthIntegerWriterV2(PositionedOutputStream output, boolean signed) {
this(output, signed, true);
}
public RunLengthIntegerWriterV2(PositionedOutputStream output, boolean signed,
boolean alignedBitpacking) {
this.output = output;
this.signed = signed;
this.zigzagLiterals = signed ? new long[MAX_SCOPE] : null;
this.alignedBitpacking = alignedBitpacking;
this.utils = new SerializationUtils();
clear();
}
private void writeValues() throws IOException {
if (numLiterals != 0) {
if (encoding.equals(EncodingType.SHORT_REPEAT)) {
writeShortRepeatValues();
} else if (encoding.equals(EncodingType.DIRECT)) {
writeDirectValues();
} else if (encoding.equals(EncodingType.PATCHED_BASE)) {
writePatchedBaseValues();
} else {
writeDeltaValues();
}
// clear all the variables
clear();
}
}
private void writeDeltaValues() throws IOException {
int len = 0;
int fb = bitsDeltaMax;
int efb = 0;
if (alignedBitpacking) {
fb = utils.getClosestAlignedFixedBits(fb);
}
if (isFixedDelta) {
// if fixed run length is greater than threshold then it will be fixed
// delta sequence with delta value 0 else fixed delta sequence with
// non-zero delta value
if (fixedRunLength > MIN_REPEAT) {
// ex. sequence: 2 2 2 2 2 2 2 2
len = fixedRunLength - 1;
fixedRunLength = 0;
} else {
// ex. sequence: 4 6 8 10 12 14 16
len = variableRunLength - 1;
variableRunLength = 0;
}
} else {
// fixed width 0 is used for long repeating values.
// sequences that require only 1 bit to encode will have an additional bit
if (fb == 1) {
fb = 2;
}
efb = utils.encodeBitWidth(fb);
efb = efb << 1;
len = variableRunLength - 1;
variableRunLength = 0;
}
// extract the 9th bit of run length
final int tailBits = (len & 0x100) >>> 8;
// create first byte of the header
final int headerFirstByte = getOpcode() | efb | tailBits;
// second byte of the header stores the remaining 8 bits of runlength
final int headerSecondByte = len & 0xff;
// write header
output.write(headerFirstByte);
output.write(headerSecondByte);
// store the first value from zigzag literal array
if (signed) {
utils.writeVslong(output, literals[0]);
} else {
utils.writeVulong(output, literals[0]);
}
if (isFixedDelta) {
// if delta is fixed then we don't need to store delta blob
utils.writeVslong(output, fixedDelta);
} else {
// store the first value as delta value using zigzag encoding
utils.writeVslong(output, adjDeltas[0]);
// adjacent delta values are bit packed. The length of adjDeltas array is
// always one less than the number of literals (delta difference for n
// elements is n-1). We have already written one element, write the
// remaining numLiterals - 2 elements here
utils.writeInts(adjDeltas, 1, numLiterals - 2, fb, output);
}
}
private void writePatchedBaseValues() throws IOException {
// NOTE: Aligned bit packing cannot be applied for PATCHED_BASE encoding
// because patch is applied to MSB bits. For example: If fixed bit width of
// base value is 7 bits and if patch is 3 bits, the actual value is
// constructed by shifting the patch to left by 7 positions.
// actual_value = patch << 7 | base_value
// So, if we align base_value then actual_value can not be reconstructed.
// write the number of fixed bits required in next 5 bits
final int fb = brBits95p;
final int efb = utils.encodeBitWidth(fb) << 1;
// adjust variable run length, they are one off
variableRunLength -= 1;
// extract the 9th bit of run length
final int tailBits = (variableRunLength & 0x100) >>> 8;
// create first byte of the header
final int headerFirstByte = getOpcode() | efb | tailBits;
// second byte of the header stores the remaining 8 bits of runlength
final int headerSecondByte = variableRunLength & 0xff;
// if the min value is negative toggle the sign
final boolean isNegative = min < 0;
if (isNegative) {
min = -min;
}
// find the number of bytes required for base and shift it by 5 bits
// to accommodate patch width. The additional bit is used to store the sign
// of the base value.
final int baseWidth = utils.findClosestNumBits(min) + 1;
final int baseBytes = baseWidth % 8 == 0 ? baseWidth / 8 : (baseWidth / 8) + 1;
final int bb = (baseBytes - 1) << 5;
// if the base value is negative then set MSB to 1
if (isNegative) {
min |= (1L << ((baseBytes * 8) - 1));
}
// third byte contains 3 bits for number of bytes occupied by base
// and 5 bits for patchWidth
final int headerThirdByte = bb | utils.encodeBitWidth(patchWidth);
// fourth byte contains 3 bits for page gap width and 5 bits for
// patch length
final int headerFourthByte = (patchGapWidth - 1) << 5 | patchLength;
// write header
output.write(headerFirstByte);
output.write(headerSecondByte);
output.write(headerThirdByte);
output.write(headerFourthByte);
// write the base value using fixed bytes in big endian order
for(int i = baseBytes - 1; i >= 0; i--) {
byte b = (byte) ((min >>> (i * 8)) & 0xff);
output.write(b);
}
// base reduced literals are bit packed
int closestFixedBits = utils.getClosestFixedBits(fb);
utils.writeInts(baseRedLiterals, 0, numLiterals, closestFixedBits,
output);
// write patch list
closestFixedBits = utils.getClosestFixedBits(patchGapWidth + patchWidth);
utils.writeInts(gapVsPatchList, 0, gapVsPatchList.length, closestFixedBits,
output);
// reset run length
variableRunLength = 0;
}
/**
* Store the opcode in 2 MSB bits
* @return opcode
*/
private int getOpcode() {
return encoding.ordinal() << 6;
}
private void writeDirectValues() throws IOException {
// write the number of fixed bits required in next 5 bits
int fb = zzBits100p;
if (alignedBitpacking) {
fb = utils.getClosestAlignedFixedBits(fb);
}
final int efb = utils.encodeBitWidth(fb) << 1;
// adjust variable run length
variableRunLength -= 1;
// extract the 9th bit of run length
final int tailBits = (variableRunLength & 0x100) >>> 8;
// create first byte of the header
final int headerFirstByte = getOpcode() | efb | tailBits;
// second byte of the header stores the remaining 8 bits of runlength
final int headerSecondByte = variableRunLength & 0xff;
// write header
output.write(headerFirstByte);
output.write(headerSecondByte);
// bit packing the zigzag encoded literals
long[] currentZigzagLiterals = signed ? zigzagLiterals : literals;
utils.writeInts(currentZigzagLiterals, 0, numLiterals, fb, output);
// reset run length
variableRunLength = 0;
}
private void writeShortRepeatValues() throws IOException {
// get the value that is repeating, compute the bits and bytes required
long repeatVal = 0;
if (signed) {
repeatVal = utils.zigzagEncode(literals[0]);
} else {
repeatVal = literals[0];
}
final int numBitsRepeatVal = utils.findClosestNumBits(repeatVal);
final int numBytesRepeatVal = numBitsRepeatVal % 8 == 0 ? numBitsRepeatVal >>> 3
: (numBitsRepeatVal >>> 3) + 1;
// write encoding type in top 2 bits
int header = getOpcode();
// write the number of bytes required for the value
header |= ((numBytesRepeatVal - 1) << 3);
// write the run length
fixedRunLength -= MIN_REPEAT;
header |= fixedRunLength;
// write the header
output.write(header);
// write the repeating value in big endian byte order
for(int i = numBytesRepeatVal - 1; i >= 0; i--) {
int b = (int) ((repeatVal >>> (i * 8)) & 0xff);
output.write(b);
}
fixedRunLength = 0;
}
/**
* Prepare for Direct or PatchedBase encoding
* compute zigZagLiterals and zzBits100p (Max number of encoding bits required)
* @return zigzagLiterals
*/
private long[] prepareForDirectOrPatchedBase() {
// only signed numbers need to compute zigzag values
if (signed) {
computeZigZagLiterals();
}
long[] currentZigzagLiterals = signed ? zigzagLiterals : literals;
zzBits100p = utils.percentileBits(currentZigzagLiterals, 0, numLiterals, 1.0);
return currentZigzagLiterals;
}
private void determineEncoding() {
// we need to compute zigzag values for DIRECT encoding if we decide to
// break early for delta overflows or for shorter runs
// not a big win for shorter runs to determine encoding
if (numLiterals <= MIN_REPEAT) {
prepareForDirectOrPatchedBase();
encoding = EncodingType.DIRECT;
return;
}
// DELTA encoding check
// for identifying monotonic sequences
boolean isIncreasing = true;
boolean isDecreasing = true;
this.isFixedDelta = true;
this.min = literals[0];
long max = literals[0];
final long initialDelta = literals[1] - literals[0];
long currDelta = 0;
long deltaMax = 0;
this.adjDeltas[0] = initialDelta;
for (int i = 1; i < numLiterals; i++) {
final long l1 = literals[i];
final long l0 = literals[i - 1];
currDelta = l1 - l0;
min = Math.min(min, l1);
max = Math.max(max, l1);
isIncreasing &= (l0 <= l1);
isDecreasing &= (l0 >= l1);
isFixedDelta &= (currDelta == initialDelta);
if (i > 1) {
adjDeltas[i - 1] = Math.abs(currDelta);
deltaMax = Math.max(deltaMax, adjDeltas[i - 1]);
}
}
// its faster to exit under delta overflow condition without checking for
// PATCHED_BASE condition as encoding using DIRECT is faster and has less
// overhead than PATCHED_BASE
if (!utils.isSafeSubtract(max, min)) {
prepareForDirectOrPatchedBase();
encoding = EncodingType.DIRECT;
return;
}
// invariant - subtracting any number from any other in the literals after
// this point won't overflow
// if min is equal to max then the delta is 0, this condition happens for
// fixed values run >10 which cannot be encoded with SHORT_REPEAT
if (min == max) {
assert isFixedDelta : min + "==" + max +
", isFixedDelta cannot be false";
assert currDelta == 0 : min + "==" + max + ", currDelta should be zero";
fixedDelta = 0;
encoding = EncodingType.DELTA;
return;
}
if (isFixedDelta) {
assert currDelta == initialDelta
: "currDelta should be equal to initialDelta for fixed delta encoding";
encoding = EncodingType.DELTA;
fixedDelta = currDelta;
return;
}
// if initialDelta is 0 then we cannot delta encode as we cannot identify
// the sign of deltas (increasing or decreasing)
if (initialDelta != 0) {
// stores the number of bits required for packing delta blob in
// delta encoding
bitsDeltaMax = utils.findClosestNumBits(deltaMax);
// monotonic condition
if (isIncreasing || isDecreasing) {
encoding = EncodingType.DELTA;
return;
}
}
// PATCHED_BASE encoding check
// percentile values are computed for the zigzag encoded values. if the
// number of bit requirement between 90th and 100th percentile varies
// beyond a threshold then we need to patch the values. if the variation
// is not significant then we can use direct encoding
long[] currentZigzagLiterals = prepareForDirectOrPatchedBase();
zzBits90p = utils.percentileBits(currentZigzagLiterals, 0, numLiterals, 0.9);
int diffBitsLH = zzBits100p - zzBits90p;
// if the difference between 90th percentile and 100th percentile fixed
// bits is > 1 then we need patch the values
if (diffBitsLH > 1) {
// patching is done only on base reduced values.
// remove base from literals
for (int i = 0; i < numLiterals; i++) {
baseRedLiterals[i] = literals[i] - min;
}
// 95th percentile width is used to determine max allowed value
// after which patching will be done
brBits95p = utils.percentileBits(baseRedLiterals, 0, numLiterals, 0.95);
// 100th percentile is used to compute the max patch width
brBits100p = utils.percentileBits(baseRedLiterals, 0, numLiterals, 1.0);
// after base reducing the values, if the difference in bits between
// 95th percentile and 100th percentile value is zero then there
// is no point in patching the values, in which case we will
// fallback to DIRECT encoding.
// The decision to use patched base was based on zigzag values, but the
// actual patching is done on base reduced literals.
if ((brBits100p - brBits95p) != 0 && Math.abs(min) < BASE_VALUE_LIMIT) {
encoding = EncodingType.PATCHED_BASE;
preparePatchedBlob();
} else {
encoding = EncodingType.DIRECT;
}
} else {
// if difference in bits between 95th percentile and 100th percentile is
// 0, then patch length will become 0. Hence we will fallback to direct
encoding = EncodingType.DIRECT;
}
}
private void computeZigZagLiterals() {
// populate zigzag encoded literals
assert signed : "only signed numbers need to compute zigzag values";
for (int i = 0; i < numLiterals; i++) {
zigzagLiterals[i] = utils.zigzagEncode(literals[i]);
}
}
private void preparePatchedBlob() {
// mask will be max value beyond which patch will be generated
long mask = (1L << brBits95p) - 1;
// since we are considering only 95 percentile, the size of gap and
// patch array can contain only be 5% values
patchLength = (int) Math.ceil((numLiterals * 0.05));
int[] gapList = new int[patchLength];
long[] patchList = new long[patchLength];
// #bit for patch
patchWidth = brBits100p - brBits95p;
patchWidth = utils.getClosestFixedBits(patchWidth);
// if patch bit requirement is 64 then it will not possible to pack
// gap and patch together in a long. To make sure gap and patch can be
// packed together adjust the patch width
if (patchWidth == 64) {
patchWidth = 56;
brBits95p = 8;
mask = (1L << brBits95p) - 1;
}
int gapIdx = 0;
int patchIdx = 0;
int prev = 0;
int gap = 0;
int maxGap = 0;
for(int i = 0; i < numLiterals; i++) {
// if value is above mask then create the patch and record the gap
if (baseRedLiterals[i] > mask) {
gap = i - prev;
if (gap > maxGap) {
maxGap = gap;
}
// gaps are relative, so store the previous patched value index
prev = i;
gapList[gapIdx++] = gap;
// extract the most significant bits that are over mask bits
long patch = baseRedLiterals[i] >>> brBits95p;
patchList[patchIdx++] = patch;
// strip off the MSB to enable safe bit packing
baseRedLiterals[i] &= mask;
}
}
// adjust the patch length to number of entries in gap list
patchLength = gapIdx;
// if the element to be patched is the first and only element then
// max gap will be 0, but to store the gap as 0 we need atleast 1 bit
if (maxGap == 0 && patchLength != 0) {
patchGapWidth = 1;
} else {
patchGapWidth = utils.findClosestNumBits(maxGap);
}
// special case: if the patch gap width is greater than 256, then
// we need 9 bits to encode the gap width. But we only have 3 bits in
// header to record the gap width. To deal with this case, we will save
// two entries in patch list in the following way
// 256 gap width => 0 for patch value
// actual gap - 256 => actual patch value
// We will do the same for gap width = 511. If the element to be patched is
// the last element in the scope then gap width will be 511. In this case we
// will have 3 entries in the patch list in the following way
// 255 gap width => 0 for patch value
// 255 gap width => 0 for patch value
// 1 gap width => actual patch value
if (patchGapWidth > 8) {
patchGapWidth = 8;
// for gap = 511, we need two additional entries in patch list
if (maxGap == 511) {
patchLength += 2;
} else {
patchLength += 1;
}
}
// create gap vs patch list
gapIdx = 0;
patchIdx = 0;
gapVsPatchList = new long[patchLength];
for(int i = 0; i < patchLength; i++) {
long g = gapList[gapIdx++];
long p = patchList[patchIdx++];
while (g > 255) {
gapVsPatchList[i++] = (255L << patchWidth);
g -= 255;
}
// store patch value in LSBs and gap in MSBs
gapVsPatchList[i] = (g << patchWidth) | p;
}
}
/**
* clears all the variables
*/
private void clear() {
numLiterals = 0;
encoding = null;
prevDelta = 0;
fixedDelta = 0;
zzBits90p = 0;
zzBits100p = 0;
brBits95p = 0;
brBits100p = 0;
bitsDeltaMax = 0;
patchGapWidth = 0;
patchLength = 0;
patchWidth = 0;
gapVsPatchList = null;
min = 0;
isFixedDelta = true;
}
@Override
public void flush() throws IOException {
if (numLiterals != 0) {
if (variableRunLength != 0) {
determineEncoding();
writeValues();
} else if (fixedRunLength != 0) {
if (fixedRunLength < MIN_REPEAT) {
variableRunLength = fixedRunLength;
fixedRunLength = 0;
determineEncoding();
} else if (fixedRunLength <= MAX_SHORT_REPEAT_LENGTH) {
encoding = EncodingType.SHORT_REPEAT;
} else {
encoding = EncodingType.DELTA;
isFixedDelta = true;
}
writeValues();
}
}
output.flush();
}
@Override
public void write(long val) throws IOException {
if (numLiterals == 0) {
initializeLiterals(val);
} else {
if (numLiterals == 1) {
prevDelta = val - literals[0];
literals[numLiterals++] = val;
// if both values are same count as fixed run else variable run
if (val == literals[0]) {
fixedRunLength = 2;
variableRunLength = 0;
} else {
fixedRunLength = 0;
variableRunLength = 2;
}
} else {
long currentDelta = val - literals[numLiterals - 1];
if (prevDelta == 0 && currentDelta == 0) {
// fixed delta run
literals[numLiterals++] = val;
// if variable run is non-zero then we are seeing repeating
// values at the end of variable run in which case keep
// updating variable and fixed runs
if (variableRunLength > 0) {
fixedRunLength = 2;
}
fixedRunLength += 1;
// if fixed run met the minimum condition and if variable
// run is non-zero then flush the variable run and shift the
// tail fixed runs to start of the buffer
if (fixedRunLength >= MIN_REPEAT && variableRunLength > 0) {
numLiterals -= MIN_REPEAT;
variableRunLength -= MIN_REPEAT - 1;
// copy the tail fixed runs
long[] tailVals = new long[MIN_REPEAT];
System.arraycopy(literals, numLiterals, tailVals, 0, MIN_REPEAT);
// determine variable encoding and flush values
determineEncoding();
writeValues();
// shift tail fixed runs to beginning of the buffer
for(long l : tailVals) {
literals[numLiterals++] = l;
}
}
// if fixed runs reached max repeat length then write values
if (fixedRunLength == MAX_SCOPE) {
encoding = EncodingType.DELTA;
isFixedDelta = true;
writeValues();
}
} else {
// variable delta run
// if fixed run length is non-zero and if it satisfies the
// short repeat conditions then write the values as short repeats
// else use delta encoding
if (fixedRunLength >= MIN_REPEAT) {
if (fixedRunLength <= MAX_SHORT_REPEAT_LENGTH) {
encoding = EncodingType.SHORT_REPEAT;
} else {
encoding = EncodingType.DELTA;
isFixedDelta = true;
}
writeValues();
}
// if fixed run length is <MIN_REPEAT and current value is
// different from previous then treat it as variable run
if (fixedRunLength > 0 && fixedRunLength < MIN_REPEAT) {
if (val != literals[numLiterals - 1]) {
variableRunLength = fixedRunLength;
fixedRunLength = 0;
}
}
// after writing values re-initialize the variables
if (numLiterals == 0) {
initializeLiterals(val);
} else {
// keep updating variable run lengths
prevDelta = val - literals[numLiterals - 1];
literals[numLiterals++] = val;
variableRunLength += 1;
// if variable run length reach the max scope, write it
if (variableRunLength == MAX_SCOPE) {
determineEncoding();
writeValues();
}
}
}
}
}
}
private void initializeLiterals(long val) {
literals[numLiterals++] = val;
fixedRunLength = 1;
variableRunLength = 1;
}
@Override
public void getPosition(PositionRecorder recorder) throws IOException {
output.getPosition(recorder);
recorder.addPosition(numLiterals);
}
@Override
public long estimateMemory() {
return output.getBufferSize();
}
@Override
public void changeIv(Consumer<byte[]> modifier) {
output.changeIv(modifier);
}
}
| 27,449 | 31.756563 | 84 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/SchemaEvolution.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
/**
* Infer and track the evolution between the schema as stored in the file and
* the schema that has been requested by the reader.
*/
public class SchemaEvolution {
// indexed by reader column id
private final TypeDescription[] readerFileTypes;
// indexed by reader column id
private final boolean[] readerIncluded;
// the offset to the first column id ignoring any ACID columns
private final int readerColumnOffset;
// indexed by file column id
private final boolean[] fileIncluded;
private final TypeDescription fileSchema;
private final TypeDescription readerSchema;
private boolean hasConversion;
private boolean isOnlyImplicitConversion;
private final boolean isAcid;
final boolean isSchemaEvolutionCaseAware;
/**
* {@code true} if acid metadata columns should be decoded otherwise they will
* be set to {@code null}. {@link #acidEventFieldNames}.
*/
private final boolean includeAcidColumns;
// indexed by file column id
private final boolean[] ppdSafeConversion;
// columns are indexed, not named between Reader & File schema
private final boolean positionalColumns;
private static final Logger LOG =
LoggerFactory.getLogger(SchemaEvolution.class);
private static final Pattern missingMetadataPattern =
Pattern.compile("_col\\d+");
public static class IllegalEvolutionException extends RuntimeException {
public IllegalEvolutionException(String msg) {
super(msg);
}
}
public SchemaEvolution(TypeDescription fileSchema,
TypeDescription readerSchema,
Reader.Options options) {
boolean allowMissingMetadata = options.getTolerateMissingSchema();
boolean[] includedCols = options.getInclude();
this.isSchemaEvolutionCaseAware=options.getIsSchemaEvolutionCaseAware();
this.readerIncluded = includedCols == null ? null :
Arrays.copyOf(includedCols, includedCols.length);
this.fileIncluded = new boolean[fileSchema.getMaximumId() + 1];
this.hasConversion = false;
this.isOnlyImplicitConversion = true;
this.fileSchema = fileSchema;
// Use file schema when reader schema not provided
readerSchema = readerSchema == null ? this.fileSchema : readerSchema;
this.isAcid = checkAcidSchema(fileSchema);
boolean readerSchemaIsAcid = checkAcidSchema(readerSchema);
this.includeAcidColumns = options.getIncludeAcidColumns();
this.readerColumnOffset = isAcid && !readerSchemaIsAcid ? acidEventFieldNames.size() : 0;
// Create type conversion using reader schema
if (isAcid && !readerSchemaIsAcid) {
this.readerSchema = createEventSchema(readerSchema);
} else {
this.readerSchema = readerSchema;
}
if (readerIncluded != null &&
readerIncluded.length + readerColumnOffset !=
this.readerSchema.getMaximumId() + 1) {
throw new IllegalArgumentException("Include vector the wrong length: "
+ this.readerSchema.toJson() + " with include length "
+ readerIncluded.length);
}
this.readerFileTypes =
new TypeDescription[this.readerSchema.getMaximumId() + 1];
int positionalLevels = 0;
if (options.getForcePositionalEvolution()) {
positionalLevels = isAcid ? 2 : options.getPositionalEvolutionLevel();
} else if (!hasColumnNames(isAcid? getBaseRow(fileSchema) : fileSchema)) {
if (!this.fileSchema.equals(this.readerSchema)) {
if (!allowMissingMetadata) {
throw new RuntimeException("Found that schema metadata is missing"
+ " from file. This is likely caused by"
+ " a writer earlier than HIVE-4243. Will"
+ " not try to reconcile schemas");
} else {
LOG.warn("Column names are missing from this file. This is"
+ " caused by a writer earlier than HIVE-4243. The reader will"
+ " reconcile schemas based on index. File type: " +
this.fileSchema + ", reader type: " + this.readerSchema);
positionalLevels = isAcid ? 2 : options.getPositionalEvolutionLevel();
}
}
}
buildConversion(fileSchema, this.readerSchema, positionalLevels);
this.positionalColumns = options.getForcePositionalEvolution();
this.ppdSafeConversion = populatePpdSafeConversion();
}
@Deprecated
public SchemaEvolution(TypeDescription fileSchema, boolean[] readerIncluded) {
this(fileSchema, null, readerIncluded);
}
@Deprecated
public SchemaEvolution(TypeDescription fileSchema,
TypeDescription readerSchema,
boolean[] readerIncluded) {
this(fileSchema, readerSchema,
new Reader.Options(new Configuration())
.include(readerIncluded));
}
// Return true iff all fields have names like _col[0-9]+
private boolean hasColumnNames(TypeDescription fileSchema) {
if (fileSchema.getCategory() != TypeDescription.Category.STRUCT) {
return true;
}
for (String fieldName : fileSchema.getFieldNames()) {
if (!missingMetadataPattern.matcher(fieldName).matches()) {
return true;
}
}
return false;
}
public boolean isSchemaEvolutionCaseAware() {
return isSchemaEvolutionCaseAware;
}
public TypeDescription getReaderSchema() {
return readerSchema;
}
/**
* Returns the non-ACID (aka base) reader type description.
*
* @return the reader type ignoring the ACID rowid columns, if any
*/
public TypeDescription getReaderBaseSchema() {
return isAcid ? getBaseRow(readerSchema) : readerSchema;
}
/**
* Does the file include ACID columns?
* @return is this an ACID file?
*/
boolean isAcid() {
return isAcid;
}
/**
* Is there Schema Evolution data type conversion?
* @return
*/
public boolean hasConversion() {
return hasConversion;
}
/**
* When there Schema Evolution data type conversion i.e. hasConversion() returns true,
* is the conversion only the implicit kind?
*
* (see aaa).
* @return
*/
public boolean isOnlyImplicitConversion() {
return isOnlyImplicitConversion;
}
public TypeDescription getFileSchema() {
return fileSchema;
}
public TypeDescription getFileType(TypeDescription readerType) {
return getFileType(readerType.getId());
}
/**
* Get the file type by reader type id.
* @param id reader column id
* @return
*/
public TypeDescription getFileType(int id) {
return readerFileTypes[id];
}
/**
* Get whether each column is included from the reader's point of view.
* @return a boolean array indexed by reader column id
*/
public boolean[] getReaderIncluded() {
return readerIncluded;
}
/**
* Get whether each column is included from the file's point of view.
* @return a boolean array indexed by file column id
*/
public boolean[] getFileIncluded() {
return fileIncluded;
}
/**
* Get whether the columns are handled via position or name
*/
public boolean getPositionalColumns() {
return this.positionalColumns;
}
/**
* Determine if there is implicit conversion from a file to reader type.
*
* Implicit conversions are:
* Small to larger integer (e.g. INT to LONG)
* FLOAT to DOUBLE
* Some String Family conversions.
*
* NOTE: This check is independent of the PPD conversion checks.
* @return
*/
private boolean typesAreImplicitConversion(final TypeDescription fileType,
final TypeDescription readerType) {
switch (fileType.getCategory()) {
case BYTE:
if (readerType.getCategory().equals(TypeDescription.Category.SHORT) ||
readerType.getCategory().equals(TypeDescription.Category.INT) ||
readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case SHORT:
if (readerType.getCategory().equals(TypeDescription.Category.INT) ||
readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case INT:
if (readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case FLOAT:
if (readerType.getCategory().equals(TypeDescription.Category.DOUBLE)) {
return true;
}
break;
case CHAR:
case VARCHAR:
if (readerType.getCategory().equals(TypeDescription.Category.STRING)) {
return true;
}
if (readerType.getCategory().equals(TypeDescription.Category.CHAR) ||
readerType.getCategory().equals(TypeDescription.Category.VARCHAR)) {
return (fileType.getMaxLength() <= readerType.getMaxLength());
}
break;
default:
break;
}
return false;
}
/**
* Check if column is safe for ppd evaluation
* @param fileColId file column id
* @return true if the specified column is safe for ppd evaluation else false
*/
public boolean isPPDSafeConversion(final int fileColId) {
if (hasConversion()) {
return !(fileColId < 0 || fileColId >= ppdSafeConversion.length) &&
ppdSafeConversion[fileColId];
}
// when there is no schema evolution PPD is safe
return true;
}
private boolean[] populatePpdSafeConversion() {
if (fileSchema == null || readerSchema == null || readerFileTypes == null) {
return null;
}
boolean[] result = new boolean[fileSchema.getMaximumId() + 1];
boolean safePpd = validatePPDConversion(fileSchema, readerSchema);
result[fileSchema.getId()] = safePpd;
return populatePpdSafeConversionForChildren(result,
readerSchema.getChildren());
}
/**
* Recursion to check the conversion of nested field.
*
* @param ppdSafeConversion boolean array to specify which column are safe.
* @param children reader schema children.
*
* @return boolean array to represent list of column safe or not.
*/
private boolean[] populatePpdSafeConversionForChildren(
boolean[] ppdSafeConversion,
List<TypeDescription> children) {
boolean safePpd;
if (children != null) {
for (TypeDescription child : children) {
TypeDescription fileType = getFileType(child.getId());
safePpd = validatePPDConversion(fileType, child);
if (fileType != null) {
ppdSafeConversion[fileType.getId()] = safePpd;
}
populatePpdSafeConversionForChildren(ppdSafeConversion,
child.getChildren());
}
}
return ppdSafeConversion;
}
private boolean validatePPDConversion(final TypeDescription fileType,
final TypeDescription readerType) {
if (fileType == null) {
return false;
}
if (fileType.getCategory().isPrimitive()) {
if (fileType.getCategory().equals(readerType.getCategory())) {
// for decimals alone do equality check to not mess up with precision change
return !(fileType.getCategory() == TypeDescription.Category.DECIMAL &&
!fileType.equals(readerType));
}
// only integer and string evolutions are safe
// byte -> short -> int -> long
// string <-> char <-> varchar
// NOTE: Float to double evolution is not safe as floats are stored as doubles in ORC's
// internal index, but when doing predicate evaluation for queries like "select * from
// orc_float where f = 74.72" the constant on the filter is converted from string -> double
// so the precisions will be different and the comparison will fail.
// Soon, we should convert all sargs that compare equality between floats or
// doubles to range predicates.
// Similarly string -> char and varchar -> char and vice versa is not possible, as ORC stores
// char with padded spaces in its internal index.
switch (fileType.getCategory()) {
case BYTE:
if (readerType.getCategory().equals(TypeDescription.Category.SHORT) ||
readerType.getCategory().equals(TypeDescription.Category.INT) ||
readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case SHORT:
if (readerType.getCategory().equals(TypeDescription.Category.INT) ||
readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case INT:
if (readerType.getCategory().equals(TypeDescription.Category.LONG)) {
return true;
}
break;
case STRING:
if (readerType.getCategory().equals(TypeDescription.Category.VARCHAR)) {
return true;
}
break;
case VARCHAR:
if (readerType.getCategory().equals(TypeDescription.Category.STRING)) {
return true;
}
break;
default:
break;
}
}
return false;
}
/**
* Should we read the given reader column?
* @param readerId the id of column in the extended reader schema
* @return true if the column should be read
*/
public boolean includeReaderColumn(int readerId) {
if(readerId == 0) {
//always want top level struct - everything is its child
return true;
}
if(isAcid) {
if(readerId < readerColumnOffset) {
return includeAcidColumns;
}
return readerIncluded == null ||
readerIncluded[readerId - readerColumnOffset];
}
return readerIncluded == null || readerIncluded[readerId];
}
/**
* Build the mapping from the file type to the reader type. For pre-HIVE-4243
* ORC files, the top level structure is matched using position within the
* row. Otherwise, structs fields are matched by name.
* @param fileType the type in the file
* @param readerType the type in the reader
* @param positionalLevels the number of structure levels that must be
* mapped by position rather than field name. Pre
* HIVE-4243 files have either 1 or 2 levels matched
* positionally depending on whether they are ACID.
*/
void buildConversion(TypeDescription fileType,
TypeDescription readerType,
int positionalLevels) {
// if the column isn't included, don't map it
if (!includeReaderColumn(readerType.getId())) {
return;
}
boolean isOk = true;
// check the easy case first
if (fileType.getCategory() == readerType.getCategory()) {
switch (readerType.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case STRING:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
case BINARY:
case DATE:
// these are always a match
break;
case CHAR:
case VARCHAR:
// We do conversion when same CHAR/VARCHAR type but different
// maxLength.
if (fileType.getMaxLength() != readerType.getMaxLength()) {
hasConversion = true;
if (!typesAreImplicitConversion(fileType, readerType)) {
isOnlyImplicitConversion = false;
}
}
break;
case DECIMAL:
// We do conversion when same DECIMAL type but different
// precision/scale.
if (fileType.getPrecision() != readerType.getPrecision() ||
fileType.getScale() != readerType.getScale()) {
hasConversion = true;
isOnlyImplicitConversion = false;
}
break;
case UNION:
case MAP:
case LIST: {
// these must be an exact match
List<TypeDescription> fileChildren = fileType.getChildren();
List<TypeDescription> readerChildren = readerType.getChildren();
if (fileChildren.size() == readerChildren.size()) {
for(int i=0; i < fileChildren.size(); ++i) {
buildConversion(fileChildren.get(i),
readerChildren.get(i), positionalLevels - 1);
}
} else {
isOk = false;
}
break;
}
case STRUCT: {
List<TypeDescription> readerChildren = readerType.getChildren();
List<TypeDescription> fileChildren = fileType.getChildren();
if (fileChildren.size() != readerChildren.size()) {
hasConversion = true;
// UNDONE: Does LLAP detect fewer columns and NULL them out????
isOnlyImplicitConversion = false;
}
if (positionalLevels <= 0) {
List<String> readerFieldNames = readerType.getFieldNames();
List<String> fileFieldNames = fileType.getFieldNames();
final Map<String, TypeDescription> fileTypesIdx;
if (isSchemaEvolutionCaseAware) {
fileTypesIdx = new HashMap<>();
} else {
fileTypesIdx = new CaseInsensitiveMap<TypeDescription>();
}
for (int i = 0; i < fileFieldNames.size(); i++) {
final String fileFieldName = fileFieldNames.get(i);
fileTypesIdx.put(fileFieldName, fileChildren.get(i));
}
for (int i = 0; i < readerFieldNames.size(); i++) {
final String readerFieldName = readerFieldNames.get(i);
TypeDescription readerField = readerChildren.get(i);
TypeDescription fileField = fileTypesIdx.get(readerFieldName);
if (fileField == null) {
continue;
}
buildConversion(fileField, readerField, 0);
}
} else {
int jointSize = Math.min(fileChildren.size(),
readerChildren.size());
for (int i = 0; i < jointSize; ++i) {
buildConversion(fileChildren.get(i), readerChildren.get(i),
positionalLevels - 1);
}
}
break;
}
default:
throw new IllegalArgumentException("Unknown type " + readerType);
}
} else {
/*
* Check for the few cases where will not convert....
*/
isOk = ConvertTreeReaderFactory.canConvert(fileType, readerType);
hasConversion = true;
if (!typesAreImplicitConversion(fileType, readerType)) {
isOnlyImplicitConversion = false;
}
}
if (isOk) {
readerFileTypes[readerType.getId()] = fileType;
fileIncluded[fileType.getId()] = true;
} else {
throw new IllegalEvolutionException(
String.format("ORC does not support type conversion from file" +
" type %s (%d) to reader type %s (%d)",
fileType, fileType.getId(),
readerType, readerType.getId()));
}
}
public static boolean checkAcidSchema(TypeDescription type) {
if (type.getCategory().equals(TypeDescription.Category.STRUCT)) {
List<String> rootFields = type.getFieldNames();
if (rootFields.size() != acidEventFieldNames.size()) {
return false;
}
for (int i = 0; i < rootFields.size(); i++) {
if (!acidEventFieldNames.get(i).equalsIgnoreCase(rootFields.get(i))) {
return false;
}
}
return true;
}
return false;
}
/**
* @param typeDescr
* @return ORC types for the ACID event based on the row's type description
*/
public static TypeDescription createEventSchema(TypeDescription typeDescr) {
TypeDescription result = TypeDescription.createStruct()
.addField("operation", TypeDescription.createInt())
.addField("originalTransaction", TypeDescription.createLong())
.addField("bucket", TypeDescription.createInt())
.addField("rowId", TypeDescription.createLong())
.addField("currentTransaction", TypeDescription.createLong())
.addField("row", typeDescr.clone());
return result;
}
/**
* Get the underlying base row from an ACID event struct.
* @param typeDescription the ACID event schema.
* @return the subtype for the real row
*/
public static TypeDescription getBaseRow(TypeDescription typeDescription) {
final int ACID_ROW_OFFSET = 5;
return typeDescription.getChildren().get(ACID_ROW_OFFSET);
}
private static final List<String> acidEventFieldNames=
new ArrayList<String>();
static {
acidEventFieldNames.add("operation");
acidEventFieldNames.add("originalTransaction");
acidEventFieldNames.add("bucket");
acidEventFieldNames.add("rowId");
acidEventFieldNames.add("currentTransaction");
acidEventFieldNames.add("row");
}
private static class CaseInsensitiveMap<V> extends HashMap<String,V> {
@Override
public V put(String key, V value) {
return super.put(key.toLowerCase(), value);
}
@Override
public V get(Object key) {
return this.get((String) key);
}
// not @Override as key to be of type Object
public V get(String key) {
return super.get(key.toLowerCase());
}
}
}
| 22,452 | 33.919129 | 99 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/SerializationUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.orc.CompressionCodec;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.impl.writer.StreamOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.util.Arrays;
import java.util.TimeZone;
public final class SerializationUtils {
private static final Logger LOG = LoggerFactory.getLogger(SerializationUtils.class);
private static final int BUFFER_SIZE = 64;
private final byte[] readBuffer;
private final byte[] writeBuffer;
/**
* Buffer for histogram that store the encoded bit requirement for each bit
* size. Maximum number of discrete bits that can encoded is ordinal of
* FixedBitSizes.
*
* @see FixedBitSizes
*/
private final int[] histBuffer = new int[32];
public SerializationUtils() {
this.readBuffer = new byte[BUFFER_SIZE];
this.writeBuffer = new byte[BUFFER_SIZE];
}
public void writeVulong(OutputStream output,
long value) throws IOException {
int posn = 0;
while (true) {
if ((value & ~0x7f) == 0) {
writeBuffer[posn++] = (byte) value;
break;
} else {
writeBuffer[posn++] = (byte)(0x80 | (value & 0x7f));
value >>>= 7;
}
}
output.write(writeBuffer, 0, posn);
}
public void writeVslong(OutputStream output,
long value) throws IOException {
writeVulong(output, (value << 1) ^ (value >> 63));
}
public static long readVulong(InputStream in) throws IOException {
long result = 0;
long b;
int offset = 0;
do {
b = in.read();
if (b == -1) {
throw new EOFException("Reading Vulong past EOF");
}
result |= (0x7f & b) << offset;
offset += 7;
} while (b >= 0x80);
return result;
}
public static long readVslong(InputStream in) throws IOException {
long result = readVulong(in);
return (result >>> 1) ^ -(result & 1);
}
public float readFloat(InputStream in) throws IOException {
readFully(in, readBuffer, 4);
int val = (((readBuffer[0] & 0xff) << 0)
+ ((readBuffer[1] & 0xff) << 8)
+ ((readBuffer[2] & 0xff) << 16)
+ ((readBuffer[3] & 0xff) << 24));
return Float.intBitsToFloat(val);
}
public void skipFloat(InputStream in, int numOfFloats) throws IOException {
IOUtils.skipFully(in, numOfFloats * 4L);
}
public void writeFloat(OutputStream output,
float value) throws IOException {
int ser = Float.floatToIntBits(value);
writeBuffer[0] = (byte) ((ser >> 0) & 0xff);
writeBuffer[1] = (byte) ((ser >> 8) & 0xff);
writeBuffer[2] = (byte) ((ser >> 16) & 0xff);
writeBuffer[3] = (byte) ((ser >> 24) & 0xff);
output.write(writeBuffer, 0, 4);
}
public double readDouble(InputStream in) throws IOException {
return Double.longBitsToDouble(readLongLE(in));
}
public long readLongLE(InputStream in) throws IOException {
readFully(in, readBuffer, 8);
return (((readBuffer[0] & 0xff) << 0)
+ ((readBuffer[1] & 0xff) << 8)
+ ((readBuffer[2] & 0xff) << 16)
+ ((long) (readBuffer[3] & 0xff) << 24)
+ ((long) (readBuffer[4] & 0xff) << 32)
+ ((long) (readBuffer[5] & 0xff) << 40)
+ ((long) (readBuffer[6] & 0xff) << 48)
+ ((long) (readBuffer[7] & 0xff) << 56));
}
private void readFully(final InputStream in, final byte[] buffer, int len)
throws IOException {
int offset = 0;
for (;;) {
final int n = in.read(buffer, offset, len);
if (n == len) {
return;
}
if (n < 0) {
throw new EOFException("Read past EOF for " + in);
}
offset += n;
len -= n;
}
}
public void skipDouble(InputStream in, int numOfDoubles) throws IOException {
IOUtils.skipFully(in, numOfDoubles * 8L);
}
public void writeDouble(OutputStream output, double value)
throws IOException {
final long bits = Double.doubleToLongBits(value);
final int first = (int) (bits & 0xFFFFFFFF);
final int second = (int) ((bits >>> 32) & 0xFFFFFFFF);
// Implementation taken from Apache Avro (org.apache.avro.io.BinaryData)
// the compiler seems to execute this order the best, likely due to
// register allocation -- the lifetime of constants is minimized.
writeBuffer[0] = (byte) (first);
writeBuffer[4] = (byte) (second);
writeBuffer[5] = (byte) (second >>> 8);
writeBuffer[1] = (byte) (first >>> 8);
writeBuffer[2] = (byte) (first >>> 16);
writeBuffer[6] = (byte) (second >>> 16);
writeBuffer[7] = (byte) (second >>> 24);
writeBuffer[3] = (byte) (first >>> 24);
output.write(writeBuffer, 0, 8);
}
/**
* Write the arbitrarily sized signed BigInteger in vint format.
*
* Signed integers are encoded using the low bit as the sign bit using zigzag
* encoding.
*
* Each byte uses the low 7 bits for data and the high bit for stop/continue.
*
* Bytes are stored LSB first.
* @param output the stream to write to
* @param value the value to output
* @throws IOException
*/
public static void writeBigInteger(OutputStream output,
BigInteger value) throws IOException {
// encode the signed number as a positive integer
value = value.shiftLeft(1);
int sign = value.signum();
if (sign < 0) {
value = value.negate();
value = value.subtract(BigInteger.ONE);
}
int length = value.bitLength();
while (true) {
long lowBits = value.longValue() & 0x7fffffffffffffffL;
length -= 63;
// write out the next 63 bits worth of data
for(int i=0; i < 9; ++i) {
// if this is the last byte, leave the high bit off
if (length <= 0 && (lowBits & ~0x7f) == 0) {
output.write((byte) lowBits);
return;
} else {
output.write((byte) (0x80 | (lowBits & 0x7f)));
lowBits >>>= 7;
}
}
value = value.shiftRight(63);
}
}
/**
* Read the signed arbitrary sized BigInteger BigInteger in vint format
* @param input the stream to read from
* @return the read BigInteger
* @throws IOException
*/
public static BigInteger readBigInteger(InputStream input) throws IOException {
BigInteger result = BigInteger.ZERO;
long work = 0;
int offset = 0;
long b;
do {
b = input.read();
if (b == -1) {
throw new EOFException("Reading BigInteger past EOF from " + input);
}
work |= (0x7f & b) << (offset % 63);
offset += 7;
// if we've read 63 bits, roll them into the result
if (offset == 63) {
result = BigInteger.valueOf(work);
work = 0;
} else if (offset % 63 == 0) {
result = result.or(BigInteger.valueOf(work).shiftLeft(offset-63));
work = 0;
}
} while (b >= 0x80);
if (work != 0) {
result = result.or(BigInteger.valueOf(work).shiftLeft((offset/63)*63));
}
// convert back to a signed number
boolean isNegative = result.testBit(0);
if (isNegative) {
result = result.add(BigInteger.ONE);
result = result.negate();
}
result = result.shiftRight(1);
return result;
}
public enum FixedBitSizes {
ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN, ELEVEN, TWELVE,
THIRTEEN, FOURTEEN, FIFTEEN, SIXTEEN, SEVENTEEN, EIGHTEEN, NINETEEN,
TWENTY, TWENTYONE, TWENTYTWO, TWENTYTHREE, TWENTYFOUR, TWENTYSIX,
TWENTYEIGHT, THIRTY, THIRTYTWO, FORTY, FORTYEIGHT, FIFTYSIX, SIXTYFOUR;
}
/**
* Count the number of bits required to encode the given value
* @param value
* @return bits required to store value
*/
public int findClosestNumBits(long value) {
return getClosestFixedBits(findNumBits(value));
}
private int findNumBits(long value) {
return 64 - Long.numberOfLeadingZeros(value);
}
/**
* zigzag encode the given value
* @param val
* @return zigzag encoded value
*/
public long zigzagEncode(long val) {
return (val << 1) ^ (val >> 63);
}
/**
* zigzag decode the given value
* @param val
* @return zizag decoded value
*/
public long zigzagDecode(long val) {
return (val >>> 1) ^ -(val & 1);
}
/**
* Compute the bits required to represent pth percentile value
* @param data - array
* @param p - percentile value (>=0.0 to <=1.0)
* @return pth percentile bits
*/
public int percentileBits(long[] data, int offset, int length, double p) {
if ((p > 1.0) || (p <= 0.0)) {
return -1;
}
Arrays.fill(this.histBuffer, 0);
// compute the histogram
for(int i = offset; i < (offset + length); i++) {
int idx = encodeBitWidth(findNumBits(data[i]));
this.histBuffer[idx] += 1;
}
int perLen = (int) (length * (1.0 - p));
// return the bits required by pth percentile length
for(int i = this.histBuffer.length - 1; i >= 0; i--) {
perLen -= this.histBuffer[i];
if (perLen < 0) {
return decodeBitWidth(i);
}
}
return 0;
}
/**
* Read n bytes in big endian order and convert to long
* @return long value
*/
public long bytesToLongBE(InStream input, int n) throws IOException {
long out = 0;
long val = 0;
while (n > 0) {
n--;
// store it in a long and then shift else integer overflow will occur
val = input.read();
out |= (val << (n * 8));
}
return out;
}
/**
* Calculate the number of bytes required
* @param n - number of values
* @param numBits - bit width
* @return number of bytes required
*/
int getTotalBytesRequired(int n, int numBits) {
return (n * numBits + 7) / 8;
}
/**
* For a given fixed bit this function will return the closest available fixed
* bit
* @param n
* @return closest valid fixed bit
*/
public int getClosestFixedBits(int n) {
if (n == 0) {
return 1;
}
if (n <= 24) {
return n;
}
if (n <= 26) {
return 26;
}
if (n <= 28) {
return 28;
}
if (n <= 30) {
return 30;
}
if (n <= 32) {
return 32;
}
if (n <= 40) {
return 40;
}
if (n <= 48) {
return 48;
}
if (n <= 56) {
return 56;
}
return 64;
}
public int getClosestAlignedFixedBits(int n) {
if (n == 0 || n == 1) {
return 1;
} else if (n > 1 && n <= 2) {
return 2;
} else if (n > 2 && n <= 4) {
return 4;
} else if (n > 4 && n <= 8) {
return 8;
} else if (n > 8 && n <= 16) {
return 16;
} else if (n > 16 && n <= 24) {
return 24;
} else if (n > 24 && n <= 32) {
return 32;
} else if (n > 32 && n <= 40) {
return 40;
} else if (n > 40 && n <= 48) {
return 48;
} else if (n > 48 && n <= 56) {
return 56;
} else {
return 64;
}
}
/**
* Finds the closest available fixed bit width match and returns its encoded
* value (ordinal).
*
* @param n fixed bit width to encode
* @return encoded fixed bit width
*/
public int encodeBitWidth(int n) {
n = getClosestFixedBits(n);
if (n >= 1 && n <= 24) {
return n - 1;
}
if (n <= 26) {
return FixedBitSizes.TWENTYSIX.ordinal();
}
if (n <= 28) {
return FixedBitSizes.TWENTYEIGHT.ordinal();
}
if (n <= 30) {
return FixedBitSizes.THIRTY.ordinal();
}
if (n <= 32) {
return FixedBitSizes.THIRTYTWO.ordinal();
}
if (n <= 40) {
return FixedBitSizes.FORTY.ordinal();
}
if (n <= 48) {
return FixedBitSizes.FORTYEIGHT.ordinal();
}
if (n <= 56) {
return FixedBitSizes.FIFTYSIX.ordinal();
}
return FixedBitSizes.SIXTYFOUR.ordinal();
}
/**
* Decodes the ordinal fixed bit value to actual fixed bit width value
* @param n - encoded fixed bit width
* @return decoded fixed bit width
*/
public static int decodeBitWidth(int n) {
if (n >= FixedBitSizes.ONE.ordinal() &&
n <= FixedBitSizes.TWENTYFOUR.ordinal()) {
return n + 1;
} else if (n == FixedBitSizes.TWENTYSIX.ordinal()) {
return 26;
} else if (n == FixedBitSizes.TWENTYEIGHT.ordinal()) {
return 28;
} else if (n == FixedBitSizes.THIRTY.ordinal()) {
return 30;
} else if (n == FixedBitSizes.THIRTYTWO.ordinal()) {
return 32;
} else if (n == FixedBitSizes.FORTY.ordinal()) {
return 40;
} else if (n == FixedBitSizes.FORTYEIGHT.ordinal()) {
return 48;
} else if (n == FixedBitSizes.FIFTYSIX.ordinal()) {
return 56;
} else {
return 64;
}
}
/**
* Bitpack and write the input values to underlying output stream
* @param input - values to write
* @param offset - offset
* @param len - length
* @param bitSize - bit width
* @param output - output stream
* @throws IOException
*/
public void writeInts(long[] input, int offset, int len, int bitSize,
OutputStream output) throws IOException {
if (input == null || input.length < 1 || offset < 0 || len < 1 || bitSize < 1) {
return;
}
switch (bitSize) {
case 1:
unrolledBitPack1(input, offset, len, output);
return;
case 2:
unrolledBitPack2(input, offset, len, output);
return;
case 4:
unrolledBitPack4(input, offset, len, output);
return;
case 8:
unrolledBitPack8(input, offset, len, output);
return;
case 16:
unrolledBitPack16(input, offset, len, output);
return;
case 24:
unrolledBitPack24(input, offset, len, output);
return;
case 32:
unrolledBitPack32(input, offset, len, output);
return;
case 40:
unrolledBitPack40(input, offset, len, output);
return;
case 48:
unrolledBitPack48(input, offset, len, output);
return;
case 56:
unrolledBitPack56(input, offset, len, output);
return;
case 64:
unrolledBitPack64(input, offset, len, output);
return;
default:
break;
}
int bitsLeft = 8;
byte current = 0;
for(int i = offset; i < (offset + len); i++) {
long value = input[i];
int bitsToWrite = bitSize;
while (bitsToWrite > bitsLeft) {
// add the bits to the bottom of the current word
current |= value >>> (bitsToWrite - bitsLeft);
// subtract out the bits we just added
bitsToWrite -= bitsLeft;
// zero out the bits above bitsToWrite
value &= (1L << bitsToWrite) - 1;
output.write(current);
current = 0;
bitsLeft = 8;
}
bitsLeft -= bitsToWrite;
current |= value << bitsLeft;
if (bitsLeft == 0) {
output.write(current);
current = 0;
bitsLeft = 8;
}
}
// flush
if (bitsLeft != 8) {
output.write(current);
current = 0;
bitsLeft = 8;
}
}
private void unrolledBitPack1(long[] input, int offset, int len,
OutputStream output) throws IOException {
final int numHops = 8;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = (int) (val | ((input[i] & 1) << 7)
| ((input[i + 1] & 1) << 6)
| ((input[i + 2] & 1) << 5)
| ((input[i + 3] & 1) << 4)
| ((input[i + 4] & 1) << 3)
| ((input[i + 5] & 1) << 2)
| ((input[i + 6] & 1) << 1)
| (input[i + 7]) & 1);
output.write(val);
val = 0;
}
if (remainder > 0) {
int startShift = 7;
for (int i = endUnroll; i < endOffset; i++) {
val = (int) (val | (input[i] & 1) << startShift);
startShift -= 1;
}
output.write(val);
}
}
private void unrolledBitPack2(long[] input, int offset, int len,
OutputStream output) throws IOException {
final int numHops = 4;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = (int) (val | ((input[i] & 3) << 6)
| ((input[i + 1] & 3) << 4)
| ((input[i + 2] & 3) << 2)
| (input[i + 3]) & 3);
output.write(val);
val = 0;
}
if (remainder > 0) {
int startShift = 6;
for (int i = endUnroll; i < endOffset; i++) {
val = (int) (val | (input[i] & 3) << startShift);
startShift -= 2;
}
output.write(val);
}
}
private void unrolledBitPack4(long[] input, int offset, int len,
OutputStream output) throws IOException {
final int numHops = 2;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = (int) (val | ((input[i] & 15) << 4) | (input[i + 1]) & 15);
output.write(val);
val = 0;
}
if (remainder > 0) {
int startShift = 4;
for (int i = endUnroll; i < endOffset; i++) {
val = (int) (val | (input[i] & 15) << startShift);
startShift -= 4;
}
output.write(val);
}
}
private void unrolledBitPack8(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 1);
}
private void unrolledBitPack16(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 2);
}
private void unrolledBitPack24(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 3);
}
private void unrolledBitPack32(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 4);
}
private void unrolledBitPack40(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 5);
}
private void unrolledBitPack48(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 6);
}
private void unrolledBitPack56(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 7);
}
private void unrolledBitPack64(long[] input, int offset, int len,
OutputStream output) throws IOException {
unrolledBitPackBytes(input, offset, len, output, 8);
}
private void unrolledBitPackBytes(long[] input, int offset, int len,
OutputStream output, int numBytes) throws IOException {
final int numHops = 8;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int i = offset;
for (; i < endUnroll; i = i + numHops) {
writeLongBE(output, input, i, numHops, numBytes);
}
if (remainder > 0) {
writeRemainingLongs(output, i, input, remainder, numBytes);
}
}
private void writeRemainingLongs(OutputStream output, int offset, long[] input, int remainder,
int numBytes) throws IOException {
final int numHops = remainder;
int idx = 0;
switch (numBytes) {
case 1:
while (remainder > 0) {
writeBuffer[idx] = (byte) (input[offset + idx] & 255);
remainder--;
idx++;
}
break;
case 2:
while (remainder > 0) {
writeLongBE2(output, input[offset + idx], idx * 2);
remainder--;
idx++;
}
break;
case 3:
while (remainder > 0) {
writeLongBE3(output, input[offset + idx], idx * 3);
remainder--;
idx++;
}
break;
case 4:
while (remainder > 0) {
writeLongBE4(output, input[offset + idx], idx * 4);
remainder--;
idx++;
}
break;
case 5:
while (remainder > 0) {
writeLongBE5(output, input[offset + idx], idx * 5);
remainder--;
idx++;
}
break;
case 6:
while (remainder > 0) {
writeLongBE6(output, input[offset + idx], idx * 6);
remainder--;
idx++;
}
break;
case 7:
while (remainder > 0) {
writeLongBE7(output, input[offset + idx], idx * 7);
remainder--;
idx++;
}
break;
case 8:
while (remainder > 0) {
writeLongBE8(output, input[offset + idx], idx * 8);
remainder--;
idx++;
}
break;
default:
break;
}
final int toWrite = numHops * numBytes;
output.write(writeBuffer, 0, toWrite);
}
private void writeLongBE(OutputStream output, long[] input, int offset,
int numHops, int numBytes) throws IOException {
switch (numBytes) {
case 1:
writeBuffer[0] = (byte) (input[offset + 0] & 255);
writeBuffer[1] = (byte) (input[offset + 1] & 255);
writeBuffer[2] = (byte) (input[offset + 2] & 255);
writeBuffer[3] = (byte) (input[offset + 3] & 255);
writeBuffer[4] = (byte) (input[offset + 4] & 255);
writeBuffer[5] = (byte) (input[offset + 5] & 255);
writeBuffer[6] = (byte) (input[offset + 6] & 255);
writeBuffer[7] = (byte) (input[offset + 7] & 255);
break;
case 2:
writeLongBE2(output, input[offset + 0], 0);
writeLongBE2(output, input[offset + 1], 2);
writeLongBE2(output, input[offset + 2], 4);
writeLongBE2(output, input[offset + 3], 6);
writeLongBE2(output, input[offset + 4], 8);
writeLongBE2(output, input[offset + 5], 10);
writeLongBE2(output, input[offset + 6], 12);
writeLongBE2(output, input[offset + 7], 14);
break;
case 3:
writeLongBE3(output, input[offset + 0], 0);
writeLongBE3(output, input[offset + 1], 3);
writeLongBE3(output, input[offset + 2], 6);
writeLongBE3(output, input[offset + 3], 9);
writeLongBE3(output, input[offset + 4], 12);
writeLongBE3(output, input[offset + 5], 15);
writeLongBE3(output, input[offset + 6], 18);
writeLongBE3(output, input[offset + 7], 21);
break;
case 4:
writeLongBE4(output, input[offset + 0], 0);
writeLongBE4(output, input[offset + 1], 4);
writeLongBE4(output, input[offset + 2], 8);
writeLongBE4(output, input[offset + 3], 12);
writeLongBE4(output, input[offset + 4], 16);
writeLongBE4(output, input[offset + 5], 20);
writeLongBE4(output, input[offset + 6], 24);
writeLongBE4(output, input[offset + 7], 28);
break;
case 5:
writeLongBE5(output, input[offset + 0], 0);
writeLongBE5(output, input[offset + 1], 5);
writeLongBE5(output, input[offset + 2], 10);
writeLongBE5(output, input[offset + 3], 15);
writeLongBE5(output, input[offset + 4], 20);
writeLongBE5(output, input[offset + 5], 25);
writeLongBE5(output, input[offset + 6], 30);
writeLongBE5(output, input[offset + 7], 35);
break;
case 6:
writeLongBE6(output, input[offset + 0], 0);
writeLongBE6(output, input[offset + 1], 6);
writeLongBE6(output, input[offset + 2], 12);
writeLongBE6(output, input[offset + 3], 18);
writeLongBE6(output, input[offset + 4], 24);
writeLongBE6(output, input[offset + 5], 30);
writeLongBE6(output, input[offset + 6], 36);
writeLongBE6(output, input[offset + 7], 42);
break;
case 7:
writeLongBE7(output, input[offset + 0], 0);
writeLongBE7(output, input[offset + 1], 7);
writeLongBE7(output, input[offset + 2], 14);
writeLongBE7(output, input[offset + 3], 21);
writeLongBE7(output, input[offset + 4], 28);
writeLongBE7(output, input[offset + 5], 35);
writeLongBE7(output, input[offset + 6], 42);
writeLongBE7(output, input[offset + 7], 49);
break;
case 8:
writeLongBE8(output, input[offset + 0], 0);
writeLongBE8(output, input[offset + 1], 8);
writeLongBE8(output, input[offset + 2], 16);
writeLongBE8(output, input[offset + 3], 24);
writeLongBE8(output, input[offset + 4], 32);
writeLongBE8(output, input[offset + 5], 40);
writeLongBE8(output, input[offset + 6], 48);
writeLongBE8(output, input[offset + 7], 56);
break;
default:
break;
}
final int toWrite = numHops * numBytes;
output.write(writeBuffer, 0, toWrite);
}
private void writeLongBE2(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 8);
writeBuffer[wbOffset + 1] = (byte) (val >>> 0);
}
private void writeLongBE3(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 16);
writeBuffer[wbOffset + 1] = (byte) (val >>> 8);
writeBuffer[wbOffset + 2] = (byte) (val >>> 0);
}
private void writeLongBE4(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 24);
writeBuffer[wbOffset + 1] = (byte) (val >>> 16);
writeBuffer[wbOffset + 2] = (byte) (val >>> 8);
writeBuffer[wbOffset + 3] = (byte) (val >>> 0);
}
private void writeLongBE5(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 32);
writeBuffer[wbOffset + 1] = (byte) (val >>> 24);
writeBuffer[wbOffset + 2] = (byte) (val >>> 16);
writeBuffer[wbOffset + 3] = (byte) (val >>> 8);
writeBuffer[wbOffset + 4] = (byte) (val >>> 0);
}
private void writeLongBE6(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 40);
writeBuffer[wbOffset + 1] = (byte) (val >>> 32);
writeBuffer[wbOffset + 2] = (byte) (val >>> 24);
writeBuffer[wbOffset + 3] = (byte) (val >>> 16);
writeBuffer[wbOffset + 4] = (byte) (val >>> 8);
writeBuffer[wbOffset + 5] = (byte) (val >>> 0);
}
private void writeLongBE7(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 48);
writeBuffer[wbOffset + 1] = (byte) (val >>> 40);
writeBuffer[wbOffset + 2] = (byte) (val >>> 32);
writeBuffer[wbOffset + 3] = (byte) (val >>> 24);
writeBuffer[wbOffset + 4] = (byte) (val >>> 16);
writeBuffer[wbOffset + 5] = (byte) (val >>> 8);
writeBuffer[wbOffset + 6] = (byte) (val >>> 0);
}
private void writeLongBE8(OutputStream output, long val, int wbOffset) {
writeBuffer[wbOffset + 0] = (byte) (val >>> 56);
writeBuffer[wbOffset + 1] = (byte) (val >>> 48);
writeBuffer[wbOffset + 2] = (byte) (val >>> 40);
writeBuffer[wbOffset + 3] = (byte) (val >>> 32);
writeBuffer[wbOffset + 4] = (byte) (val >>> 24);
writeBuffer[wbOffset + 5] = (byte) (val >>> 16);
writeBuffer[wbOffset + 6] = (byte) (val >>> 8);
writeBuffer[wbOffset + 7] = (byte) (val >>> 0);
}
/**
* Read bitpacked integers from input stream
* @param buffer - input buffer
* @param offset - offset
* @param len - length
* @param bitSize - bit width
* @param input - input stream
* @throws IOException
*/
public void readInts(long[] buffer, int offset, int len, int bitSize,
InStream input) throws IOException {
int bitsLeft = 0;
int current = 0;
switch (bitSize) {
case 1:
unrolledUnPack1(buffer, offset, len, input);
return;
case 2:
unrolledUnPack2(buffer, offset, len, input);
return;
case 4:
unrolledUnPack4(buffer, offset, len, input);
return;
case 8:
unrolledUnPack8(buffer, offset, len, input);
return;
case 16:
unrolledUnPack16(buffer, offset, len, input);
return;
case 24:
unrolledUnPack24(buffer, offset, len, input);
return;
case 32:
unrolledUnPack32(buffer, offset, len, input);
return;
case 40:
unrolledUnPack40(buffer, offset, len, input);
return;
case 48:
unrolledUnPack48(buffer, offset, len, input);
return;
case 56:
unrolledUnPack56(buffer, offset, len, input);
return;
case 64:
unrolledUnPack64(buffer, offset, len, input);
return;
default:
break;
}
for(int i = offset; i < (offset + len); i++) {
long result = 0;
int bitsLeftToRead = bitSize;
while (bitsLeftToRead > bitsLeft) {
result <<= bitsLeft;
result |= current & ((1 << bitsLeft) - 1);
bitsLeftToRead -= bitsLeft;
current = input.read();
bitsLeft = 8;
}
// handle the left over bits
if (bitsLeftToRead > 0) {
result <<= bitsLeftToRead;
bitsLeft -= bitsLeftToRead;
result |= (current >> bitsLeft) & ((1 << bitsLeftToRead) - 1);
}
buffer[i] = result;
}
}
private void unrolledUnPack1(long[] buffer, int offset, int len,
InStream input) throws IOException {
final int numHops = 8;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = input.read();
buffer[i] = (val >>> 7) & 1;
buffer[i + 1] = (val >>> 6) & 1;
buffer[i + 2] = (val >>> 5) & 1;
buffer[i + 3] = (val >>> 4) & 1;
buffer[i + 4] = (val >>> 3) & 1;
buffer[i + 5] = (val >>> 2) & 1;
buffer[i + 6] = (val >>> 1) & 1;
buffer[i + 7] = val & 1;
}
if (remainder > 0) {
int startShift = 7;
val = input.read();
for (int i = endUnroll; i < endOffset; i++) {
buffer[i] = (val >>> startShift) & 1;
startShift -= 1;
}
}
}
private void unrolledUnPack2(long[] buffer, int offset, int len,
InStream input) throws IOException {
final int numHops = 4;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = input.read();
buffer[i] = (val >>> 6) & 3;
buffer[i + 1] = (val >>> 4) & 3;
buffer[i + 2] = (val >>> 2) & 3;
buffer[i + 3] = val & 3;
}
if (remainder > 0) {
int startShift = 6;
val = input.read();
for (int i = endUnroll; i < endOffset; i++) {
buffer[i] = (val >>> startShift) & 3;
startShift -= 2;
}
}
}
private void unrolledUnPack4(long[] buffer, int offset, int len,
InStream input) throws IOException {
final int numHops = 2;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int val = 0;
for (int i = offset; i < endUnroll; i = i + numHops) {
val = input.read();
buffer[i] = (val >>> 4) & 15;
buffer[i + 1] = val & 15;
}
if (remainder > 0) {
int startShift = 4;
val = input.read();
for (int i = endUnroll; i < endOffset; i++) {
buffer[i] = (val >>> startShift) & 15;
startShift -= 4;
}
}
}
private void unrolledUnPack8(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 1);
}
private void unrolledUnPack16(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 2);
}
private void unrolledUnPack24(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 3);
}
private void unrolledUnPack32(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 4);
}
private void unrolledUnPack40(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 5);
}
private void unrolledUnPack48(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 6);
}
private void unrolledUnPack56(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 7);
}
private void unrolledUnPack64(long[] buffer, int offset, int len,
InStream input) throws IOException {
unrolledUnPackBytes(buffer, offset, len, input, 8);
}
private void unrolledUnPackBytes(long[] buffer, int offset, int len, InStream input, int numBytes)
throws IOException {
final int numHops = 8;
final int remainder = len % numHops;
final int endOffset = offset + len;
final int endUnroll = endOffset - remainder;
int i = offset;
for (; i < endUnroll; i = i + numHops) {
readLongBE(input, buffer, i, numHops, numBytes);
}
if (remainder > 0) {
readRemainingLongs(buffer, i, input, remainder, numBytes);
}
}
private void readRemainingLongs(long[] buffer, int offset, InStream input, int remainder,
int numBytes) throws IOException {
final int toRead = remainder * numBytes;
// bulk read to buffer
int bytesRead = input.read(readBuffer, 0, toRead);
while (bytesRead != toRead) {
bytesRead += input.read(readBuffer, bytesRead, toRead - bytesRead);
}
int idx = 0;
switch (numBytes) {
case 1:
while (remainder > 0) {
buffer[offset++] = readBuffer[idx] & 255;
remainder--;
idx++;
}
break;
case 2:
while (remainder > 0) {
buffer[offset++] = readLongBE2(input, idx * 2);
remainder--;
idx++;
}
break;
case 3:
while (remainder > 0) {
buffer[offset++] = readLongBE3(input, idx * 3);
remainder--;
idx++;
}
break;
case 4:
while (remainder > 0) {
buffer[offset++] = readLongBE4(input, idx * 4);
remainder--;
idx++;
}
break;
case 5:
while (remainder > 0) {
buffer[offset++] = readLongBE5(input, idx * 5);
remainder--;
idx++;
}
break;
case 6:
while (remainder > 0) {
buffer[offset++] = readLongBE6(input, idx * 6);
remainder--;
idx++;
}
break;
case 7:
while (remainder > 0) {
buffer[offset++] = readLongBE7(input, idx * 7);
remainder--;
idx++;
}
break;
case 8:
while (remainder > 0) {
buffer[offset++] = readLongBE8(input, idx * 8);
remainder--;
idx++;
}
break;
default:
break;
}
}
private void readLongBE(InStream in, long[] buffer, int start, int numHops, int numBytes)
throws IOException {
final int toRead = numHops * numBytes;
// bulk read to buffer
int bytesRead = in.read(readBuffer, 0, toRead);
while (bytesRead != toRead) {
bytesRead += in.read(readBuffer, bytesRead, toRead - bytesRead);
}
switch (numBytes) {
case 1:
buffer[start + 0] = readBuffer[0] & 255;
buffer[start + 1] = readBuffer[1] & 255;
buffer[start + 2] = readBuffer[2] & 255;
buffer[start + 3] = readBuffer[3] & 255;
buffer[start + 4] = readBuffer[4] & 255;
buffer[start + 5] = readBuffer[5] & 255;
buffer[start + 6] = readBuffer[6] & 255;
buffer[start + 7] = readBuffer[7] & 255;
break;
case 2:
buffer[start + 0] = readLongBE2(in, 0);
buffer[start + 1] = readLongBE2(in, 2);
buffer[start + 2] = readLongBE2(in, 4);
buffer[start + 3] = readLongBE2(in, 6);
buffer[start + 4] = readLongBE2(in, 8);
buffer[start + 5] = readLongBE2(in, 10);
buffer[start + 6] = readLongBE2(in, 12);
buffer[start + 7] = readLongBE2(in, 14);
break;
case 3:
buffer[start + 0] = readLongBE3(in, 0);
buffer[start + 1] = readLongBE3(in, 3);
buffer[start + 2] = readLongBE3(in, 6);
buffer[start + 3] = readLongBE3(in, 9);
buffer[start + 4] = readLongBE3(in, 12);
buffer[start + 5] = readLongBE3(in, 15);
buffer[start + 6] = readLongBE3(in, 18);
buffer[start + 7] = readLongBE3(in, 21);
break;
case 4:
buffer[start + 0] = readLongBE4(in, 0);
buffer[start + 1] = readLongBE4(in, 4);
buffer[start + 2] = readLongBE4(in, 8);
buffer[start + 3] = readLongBE4(in, 12);
buffer[start + 4] = readLongBE4(in, 16);
buffer[start + 5] = readLongBE4(in, 20);
buffer[start + 6] = readLongBE4(in, 24);
buffer[start + 7] = readLongBE4(in, 28);
break;
case 5:
buffer[start + 0] = readLongBE5(in, 0);
buffer[start + 1] = readLongBE5(in, 5);
buffer[start + 2] = readLongBE5(in, 10);
buffer[start + 3] = readLongBE5(in, 15);
buffer[start + 4] = readLongBE5(in, 20);
buffer[start + 5] = readLongBE5(in, 25);
buffer[start + 6] = readLongBE5(in, 30);
buffer[start + 7] = readLongBE5(in, 35);
break;
case 6:
buffer[start + 0] = readLongBE6(in, 0);
buffer[start + 1] = readLongBE6(in, 6);
buffer[start + 2] = readLongBE6(in, 12);
buffer[start + 3] = readLongBE6(in, 18);
buffer[start + 4] = readLongBE6(in, 24);
buffer[start + 5] = readLongBE6(in, 30);
buffer[start + 6] = readLongBE6(in, 36);
buffer[start + 7] = readLongBE6(in, 42);
break;
case 7:
buffer[start + 0] = readLongBE7(in, 0);
buffer[start + 1] = readLongBE7(in, 7);
buffer[start + 2] = readLongBE7(in, 14);
buffer[start + 3] = readLongBE7(in, 21);
buffer[start + 4] = readLongBE7(in, 28);
buffer[start + 5] = readLongBE7(in, 35);
buffer[start + 6] = readLongBE7(in, 42);
buffer[start + 7] = readLongBE7(in, 49);
break;
case 8:
buffer[start + 0] = readLongBE8(in, 0);
buffer[start + 1] = readLongBE8(in, 8);
buffer[start + 2] = readLongBE8(in, 16);
buffer[start + 3] = readLongBE8(in, 24);
buffer[start + 4] = readLongBE8(in, 32);
buffer[start + 5] = readLongBE8(in, 40);
buffer[start + 6] = readLongBE8(in, 48);
buffer[start + 7] = readLongBE8(in, 56);
break;
default:
break;
}
}
private long readLongBE2(InStream in, int rbOffset) {
return (((readBuffer[rbOffset] & 255) << 8)
+ ((readBuffer[rbOffset + 1] & 255) << 0));
}
private long readLongBE3(InStream in, int rbOffset) {
return (((readBuffer[rbOffset] & 255) << 16)
+ ((readBuffer[rbOffset + 1] & 255) << 8)
+ ((readBuffer[rbOffset + 2] & 255) << 0));
}
private long readLongBE4(InStream in, int rbOffset) {
return (((long) (readBuffer[rbOffset] & 255) << 24)
+ ((readBuffer[rbOffset + 1] & 255) << 16)
+ ((readBuffer[rbOffset + 2] & 255) << 8)
+ ((readBuffer[rbOffset + 3] & 255) << 0));
}
private long readLongBE5(InStream in, int rbOffset) {
return (((long) (readBuffer[rbOffset] & 255) << 32)
+ ((long) (readBuffer[rbOffset + 1] & 255) << 24)
+ ((readBuffer[rbOffset + 2] & 255) << 16)
+ ((readBuffer[rbOffset + 3] & 255) << 8)
+ ((readBuffer[rbOffset + 4] & 255) << 0));
}
private long readLongBE6(InStream in, int rbOffset) {
return (((long) (readBuffer[rbOffset] & 255) << 40)
+ ((long) (readBuffer[rbOffset + 1] & 255) << 32)
+ ((long) (readBuffer[rbOffset + 2] & 255) << 24)
+ ((readBuffer[rbOffset + 3] & 255) << 16)
+ ((readBuffer[rbOffset + 4] & 255) << 8)
+ ((readBuffer[rbOffset + 5] & 255) << 0));
}
private long readLongBE7(InStream in, int rbOffset) {
return (((long) (readBuffer[rbOffset] & 255) << 48)
+ ((long) (readBuffer[rbOffset + 1] & 255) << 40)
+ ((long) (readBuffer[rbOffset + 2] & 255) << 32)
+ ((long) (readBuffer[rbOffset + 3] & 255) << 24)
+ ((readBuffer[rbOffset + 4] & 255) << 16)
+ ((readBuffer[rbOffset + 5] & 255) << 8)
+ ((readBuffer[rbOffset + 6] & 255) << 0));
}
private long readLongBE8(InStream in, int rbOffset) {
return (((long) (readBuffer[rbOffset] & 255) << 56)
+ ((long) (readBuffer[rbOffset + 1] & 255) << 48)
+ ((long) (readBuffer[rbOffset + 2] & 255) << 40)
+ ((long) (readBuffer[rbOffset + 3] & 255) << 32)
+ ((long) (readBuffer[rbOffset + 4] & 255) << 24)
+ ((readBuffer[rbOffset + 5] & 255) << 16)
+ ((readBuffer[rbOffset + 6] & 255) << 8)
+ ((readBuffer[rbOffset + 7] & 255) << 0));
}
// Do not want to use Guava LongMath.checkedSubtract() here as it will throw
// ArithmeticException in case of overflow
public boolean isSafeSubtract(long left, long right) {
return (left ^ right) >= 0 || (left ^ (left - right)) >= 0;
}
/**
* Convert a UTC time to a local timezone
* @param local the local timezone
* @param time the number of seconds since 1970
* @return the converted timestamp
*/
public static double convertFromUtc(TimeZone local, double time) {
int offset = local.getOffset((long) (time*1000) - local.getRawOffset());
return time - offset / 1000.0;
}
public static long convertFromUtc(TimeZone local, long time) {
int offset = local.getOffset(time - local.getRawOffset());
return time - offset;
}
public static long convertToUtc(TimeZone local, long time) {
int offset = local.getOffset(time);
return time + offset;
}
/**
* Get the stream options with the compression tuned for the particular
* kind of stream.
* @param base the original options
* @param strategy the compression strategy
* @param kind the stream kind
* @return the tuned options or the original if it is the same
*/
public static StreamOptions getCustomizedCodec(StreamOptions base,
OrcFile.CompressionStrategy strategy,
OrcProto.Stream.Kind kind) {
if (base.getCodec() != null) {
CompressionCodec.Options options = base.getCodecOptions();
switch (kind) {
case BLOOM_FILTER:
case DATA:
case DICTIONARY_DATA:
case BLOOM_FILTER_UTF8:
options = options.copy().setData(CompressionCodec.DataKind.TEXT);
if (strategy == OrcFile.CompressionStrategy.SPEED) {
options.setSpeed(CompressionCodec.SpeedModifier.FAST);
} else {
options.setSpeed(CompressionCodec.SpeedModifier.DEFAULT);
}
break;
case LENGTH:
case DICTIONARY_COUNT:
case PRESENT:
case ROW_INDEX:
case SECONDARY:
options = options.copy()
.setSpeed(CompressionCodec.SpeedModifier.FASTEST)
.setData(CompressionCodec.DataKind.BINARY);
break;
default:
LOG.info("Missing ORC compression modifiers for " + kind);
break;
}
if (!base.getCodecOptions().equals(options)) {
StreamOptions result = new StreamOptions(base)
.withCodec(base.getCodec(), options);
return result;
}
}
return base;
}
/**
* Find the relative offset when moving between timezones at a particular
* point in time.
*
* This is a function of ORC v0 and v1 writing timestamps relative to the
* local timezone. Therefore, when we read, we need to convert from the
* writer's timezone to the reader's timezone.
*
* @param writer the timezone we are moving from
* @param reader the timezone we are moving to
* @param millis the point in time
* @return the change in milliseconds
*/
public static long convertBetweenTimezones(TimeZone writer, TimeZone reader,
long millis) {
final long writerOffset = writer.getOffset(millis);
final long readerOffset = reader.getOffset(millis);
long adjustedMillis = millis + writerOffset - readerOffset;
// If the timezone adjustment moves the millis across a DST boundary, we
// need to reevaluate the offsets.
long adjustedReader = reader.getOffset(adjustedMillis);
return writerOffset - adjustedReader;
}
/**
* Convert a bytes vector element into a String.
* @param vector the vector to use
* @param elementNum the element number to stringify
* @return a string or null if the value was null
*/
public static String bytesVectorToString(BytesColumnVector vector,
int elementNum) {
if (vector.isRepeating) {
elementNum = 0;
}
return vector.noNulls || !vector.isNull[elementNum] ?
new String(vector.vector[elementNum], vector.start[elementNum],
vector.length[elementNum], StandardCharsets.UTF_8) : null;
}
/**
* Parse a date from a string.
* @param string the date to parse (YYYY-MM-DD)
* @return the Date parsed, or null if there was a parse error.
*/
public static Date parseDateFromString(String string) {
try {
Date value = Date.valueOf(string);
return value;
} catch (IllegalArgumentException e) {
return null;
}
}
}
| 47,348 | 30.970966 | 100 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/SnappyCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import io.airlift.compress.snappy.SnappyCompressor;
import io.airlift.compress.snappy.SnappyDecompressor;
import org.apache.orc.CompressionKind;
import java.io.IOException;
import java.nio.ByteBuffer;
public class SnappyCodec extends AircompressorCodec
implements DirectDecompressionCodec{
private static final HadoopShims SHIMS = HadoopShimsFactory.get();
Boolean direct = null;
HadoopShims.DirectDecompressor decompressShim = null;
SnappyCodec() {
super(CompressionKind.SNAPPY, new SnappyCompressor(), new SnappyDecompressor());
}
@Override
public void decompress(ByteBuffer in, ByteBuffer out) throws IOException {
if(in.isDirect() && out.isDirect()) {
directDecompress(in, out);
return;
}
super.decompress(in, out);
}
@Override
public boolean isAvailable() {
if (direct == null) {
try {
ensureShim();
direct = (decompressShim != null);
} catch (UnsatisfiedLinkError ule) {
direct = Boolean.valueOf(false);
}
}
return direct.booleanValue();
}
@Override
public void directDecompress(ByteBuffer in, ByteBuffer out)
throws IOException {
ensureShim();
decompressShim.decompress(in, out);
out.flip(); // flip for read
}
private void ensureShim() {
if (decompressShim == null) {
decompressShim = SHIMS.getDirectDecompressor(HadoopShims.DirectCompressionType.SNAPPY);
}
}
@Override
public void reset() {
super.reset();
if (decompressShim != null) {
decompressShim.reset();
}
}
@Override
public void destroy() {
super.destroy();
if (decompressShim != null) {
decompressShim.end();
}
}
}
| 2,527 | 26.78022 | 93 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/StreamName.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.EncryptionVariant;
import org.apache.orc.OrcProto;
import org.jetbrains.annotations.NotNull;
/**
* The name of a stream within a stripe.
* <p>
* Sorted by area, encryption, column, and then kind.
*/
public class StreamName implements Comparable<StreamName> {
private final int column;
private final EncryptionVariant encryption;
private final OrcProto.Stream.Kind kind;
public enum Area {
DATA, INDEX, FOOTER
}
public StreamName(int column, OrcProto.Stream.Kind kind) {
this(column, kind, null);
}
public StreamName(int column, OrcProto.Stream.Kind kind,
EncryptionVariant encryption) {
this.column = column;
this.kind = kind;
this.encryption = encryption;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof StreamName) {
StreamName other = (StreamName) obj;
return other.column == column && other.kind == kind &&
encryption == other.encryption;
} else {
return false;
}
}
@Override
public int compareTo(@NotNull StreamName streamName) {
Area area = getArea();
Area otherArea = streamName.getArea();
if (area != otherArea) {
return otherArea.compareTo(area);
} else if (encryption != streamName.encryption) {
if (encryption == null || streamName.encryption == null) {
return encryption == null ? -1 : 1;
} else {
return encryption.getVariantId() < streamName.encryption.getVariantId()?
-1 : 1;
}
} else if (column != streamName.column) {
return column < streamName.column ? -1 : 1;
}
return kind.compareTo(streamName.kind);
}
public int getColumn() {
return column;
}
public OrcProto.Stream.Kind getKind() {
return kind;
}
public Area getArea() {
return getArea(kind);
}
public static Area getArea(OrcProto.Stream.Kind kind) {
switch (kind) {
case FILE_STATISTICS:
case STRIPE_STATISTICS:
return Area.FOOTER;
case ROW_INDEX:
case DICTIONARY_COUNT:
case BLOOM_FILTER:
case BLOOM_FILTER_UTF8:
case ENCRYPTED_INDEX:
return Area.INDEX;
default:
return Area.DATA;
}
}
/**
* Get the encryption information for this stream.
* @return the encryption information or null if it isn't encrypted
*/
public EncryptionVariant getEncryption() {
return encryption;
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("column ");
buffer.append(column);
buffer.append(" kind ");
buffer.append(kind);
if (encryption != null) {
buffer.append(" encrypt ");
buffer.append(encryption.getKeyDescription());
}
return buffer.toString();
}
@Override
public int hashCode() {
return (encryption == null ? 0 : encryption.getVariantId() * 10001) +
column * 101 + kind.getNumber();
}
}
| 3,805 | 26.781022 | 80 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/StringHashTableDictionary.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Using HashTable to represent a dictionary. The strings are stored as UTF-8 bytes
* and an offset for each entry. It is using chaining for collision resolution.
* <p>
* This implementation is not thread-safe.
*/
public class StringHashTableDictionary implements Dictionary {
// containing all keys every seen in bytes.
private final DynamicByteArray byteArray = new DynamicByteArray();
// containing starting offset of the key (in byte) in the byte array.
private final DynamicIntArray keyOffsets;
private DynamicIntArray[] hashBuckets;
private int capacity;
private int threshold;
private float loadFactor;
private static float DEFAULT_LOAD_FACTOR = 0.75f;
/**
* Picked based on :
* 1. default strip size (64MB),
* 2. an assumption that record size is around 500B,
* 3. and an assumption that there are 20% distinct keys among all keys seen within a stripe.
* We then have the following equation:
* 4096 * 0.75 (capacity without resize) * avgBucketSize * 5 (20% distinct) = 64 * 1024 * 1024 / 500
* from which we deduce avgBucketSize ~8
*/
private static final int BUCKET_SIZE = 8;
/**
* The maximum size of array to allocate, value being the same as {@link java.util.Hashtable},
* given the fact that the stripe size could be increased to larger value by configuring "orc.stripe.size".
*/
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
public StringHashTableDictionary(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
public StringHashTableDictionary(int initialCapacity, float loadFactor) {
this.capacity = initialCapacity;
this.loadFactor = loadFactor;
this.keyOffsets = new DynamicIntArray(initialCapacity);
initializeHashBuckets();
this.threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1);
}
/**
* Initialize the hash buckets. This will create the hash buckets if they have
* not already been created; otherwise the existing buckets will be overwritten
* (cleared).
*/
private void initializeHashBuckets() {
final DynamicIntArray[] newBuckets =
(this.hashBuckets == null) ? new DynamicIntArray[this.capacity] : this.hashBuckets;
for (int i = 0; i < this.capacity; i++) {
// We don't need large bucket: If we have more than a handful of collisions,
// then the table is too small or the function isn't good.
newBuckets[i] = createBucket();
}
this.hashBuckets = newBuckets;
}
private DynamicIntArray createBucket() {
return new DynamicIntArray(BUCKET_SIZE);
}
@Override
public void visit(Visitor visitor)
throws IOException {
traverse(visitor, new VisitorContextImpl(this.byteArray, this.keyOffsets));
}
private void traverse(Visitor visitor, VisitorContextImpl context) throws IOException {
for (DynamicIntArray intArray : hashBuckets) {
for (int i = 0; i < intArray.size() ; i ++) {
context.setPosition(intArray.get(i));
visitor.visit(context);
}
}
}
@Override
public void clear() {
byteArray.clear();
keyOffsets.clear();
initializeHashBuckets();
}
@Override
public void getText(Text result, int positionInKeyOffset) {
DictionaryUtils.getTextInternal(result, positionInKeyOffset, this.keyOffsets, this.byteArray);
}
@Override
public ByteBuffer getText(int positionInKeyOffset) {
return DictionaryUtils.getTextInternal(positionInKeyOffset, this.keyOffsets, this.byteArray);
}
@Override
public int writeTo(OutputStream out, int position) throws IOException {
return DictionaryUtils.writeToTextInternal(out, position, this.keyOffsets, this.byteArray);
}
public int add(Text text) {
return add(text.getBytes(), 0, text.getLength());
}
@Override
public int add(final byte[] bytes, final int offset, final int length) {
resizeIfNeeded();
int index = getIndex(bytes, offset, length);
DynamicIntArray candidateArray = hashBuckets[index];
for (int i = 0; i < candidateArray.size(); i++) {
final int candidateIndex = candidateArray.get(i);
if (DictionaryUtils.equalsInternal(bytes, offset, length, candidateIndex,
this.keyOffsets, this.byteArray)) {
return candidateIndex;
}
}
// if making it here, it means no match.
int currIdx = keyOffsets.size();
keyOffsets.add(byteArray.add(bytes, offset, length));
candidateArray.add(currIdx);
return currIdx;
}
private void resizeIfNeeded() {
if (keyOffsets.size() >= threshold) {
int oldCapacity = this.capacity;
int newCapacity = (oldCapacity << 1) + 1;
this.capacity = newCapacity;
doResize(newCapacity, oldCapacity);
this.threshold = (int)Math.min(newCapacity * loadFactor, MAX_ARRAY_SIZE + 1);
}
}
@Override
public int size() {
return keyOffsets.size();
}
/**
* Compute the hash value and find the corresponding index.
*/
int getIndex(Text text) {
return getIndex(text.getBytes(), 0, text.getLength());
}
/**
* Compute the hash value and find the corresponding index.
*/
int getIndex(final byte[] bytes, final int offset, final int length) {
int hash = 1;
for (int i = offset; i < offset + length; i++) {
hash = (31 * hash) + bytes[i];
}
return Math.floorMod(hash, capacity);
}
// Resize the hash table, re-hash all the existing keys.
// byteArray and keyOffsetsArray don't have to be re-filled.
private void doResize(int newCapacity, int oldCapacity) {
DynamicIntArray[] resizedHashBuckets = new DynamicIntArray[newCapacity];
for (int i = 0; i < newCapacity; i++) {
resizedHashBuckets[i] = createBucket();
}
for (int i = 0; i < oldCapacity; i++) {
DynamicIntArray oldBucket = hashBuckets[i];
for (int j = 0; j < oldBucket.size(); j++) {
final int offset = oldBucket.get(j);
ByteBuffer text = getText(offset);
resizedHashBuckets[getIndex(text.array(),
text.position(), text.remaining())].add(oldBucket.get(j));
}
}
hashBuckets = resizedHashBuckets;
}
@Override
public long getSizeInBytes() {
long bucketTotalSize = 0L;
for (DynamicIntArray dynamicIntArray : hashBuckets) {
bucketTotalSize += dynamicIntArray.size();
}
return byteArray.getSizeInBytes() + keyOffsets.getSizeInBytes() + bucketTotalSize ;
}
}
| 7,392 | 31.283843 | 109 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/StringRedBlackTree.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* A red-black tree that stores strings. The strings are stored as UTF-8 bytes
* and an offset for each entry.
*/
public class StringRedBlackTree extends RedBlackTree implements Dictionary {
private final DynamicByteArray byteArray = new DynamicByteArray();
private final DynamicIntArray keyOffsets;
private final Text newKey = new Text();
public StringRedBlackTree(int initialCapacity) {
super(initialCapacity);
keyOffsets = new DynamicIntArray(initialCapacity);
}
public int add(String value) {
newKey.set(value);
return addNewKey();
}
private int addNewKey() {
// if the newKey is actually new, add it to our byteArray and store the offset & length
if (add()) {
int len = newKey.getLength();
keyOffsets.add(byteArray.add(newKey.getBytes(), 0, len));
}
return lastAdd;
}
public int add(Text value) {
newKey.set(value);
return addNewKey();
}
@Override
public int add(byte[] bytes, int offset, int length) {
newKey.set(bytes, offset, length);
return addNewKey();
}
@Override
protected int compareValue(int position) {
int start = keyOffsets.get(position);
int end;
if (position + 1 == keyOffsets.size()) {
end = byteArray.size();
} else {
end = keyOffsets.get(position+1);
}
return byteArray.compare(newKey.getBytes(), 0, newKey.getLength(),
start, end - start);
}
private void recurse(int node, Dictionary.Visitor visitor,
VisitorContextImpl context) throws IOException {
if (node != NULL) {
recurse(getLeft(node), visitor, context);
context.setPosition(node);
visitor.visit(context);
recurse(getRight(node), visitor, context);
}
}
/**
* Visit all of the nodes in the tree in sorted order.
* @param visitor the action to be applied to each node
* @throws IOException
*/
@Override
public void visit(Dictionary.Visitor visitor) throws IOException {
recurse(root, visitor,
new VisitorContextImpl(this.byteArray, this.keyOffsets));
}
/**
* Reset the table to empty.
*/
@Override
public void clear() {
super.clear();
byteArray.clear();
keyOffsets.clear();
}
@Override
public void getText(Text result, int originalPosition) {
DictionaryUtils.getTextInternal(result, originalPosition, this.keyOffsets, this.byteArray);
}
@Override
public ByteBuffer getText(int positionInKeyOffset) {
return DictionaryUtils.getTextInternal(positionInKeyOffset, this.keyOffsets, this.byteArray);
}
@Override
public int writeTo(OutputStream out, int position) throws IOException {
return DictionaryUtils.writeToTextInternal(out, position, this.keyOffsets,
this.byteArray);
}
/**
* Get the size of the character data in the table.
* @return the bytes used by the table
*/
public int getCharacterSize() {
return byteArray.size();
}
/**
* Calculate the approximate size in memory.
* @return the number of bytes used in storing the tree.
*/
@Override
public long getSizeInBytes() {
return byteArray.getSizeInBytes() + keyOffsets.getSizeInBytes() +
super.getSizeInBytes();
}
}
| 4,169 | 27.758621 | 97 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/StripeStatisticsImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import java.util.ArrayList;
import java.util.List;
public class StripeStatisticsImpl extends StripeStatistics {
public StripeStatisticsImpl(TypeDescription schema,
List<OrcProto.ColumnStatistics> list,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
super(schema, list, writerUsedProlepticGregorian, convertToProlepticGregorian);
}
public StripeStatisticsImpl(TypeDescription schema,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
super(schema, createList(schema), writerUsedProlepticGregorian,
convertToProlepticGregorian);
}
/**
* Create a list that will be filled in later.
* @param schema the schema for this stripe statistics
* @return a new list of nulls for each column
*/
private static List<OrcProto.ColumnStatistics> createList(TypeDescription schema) {
int len = schema.getMaximumId() - schema.getId() + 1;
List<OrcProto.ColumnStatistics> result = new ArrayList<>(len);
for(int c=0; c < len; ++c) {
result.add(null);
}
return result;
}
public void updateColumn(int column, OrcProto.ColumnStatistics elem) {
cs.set(column, elem);
}
}
| 2,283 | 36.442623 | 85 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/TreeReaderFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
import org.apache.hadoop.hive.ql.io.filter.FilterContext;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.StripePlanner;
import org.apache.orc.impl.reader.tree.BatchReader;
import org.apache.orc.impl.reader.tree.PrimitiveBatchReader;
import org.apache.orc.impl.reader.tree.StructBatchReader;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.apache.orc.impl.writer.TimestampTreeWriter;
import org.jetbrains.annotations.NotNull;
import java.io.EOFException;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.function.Consumer;
/**
* Factory for creating ORC tree readers.
*/
public class TreeReaderFactory {
public interface Context {
SchemaEvolution getSchemaEvolution();
Set<Integer> getColumnFilterIds();
Consumer<OrcFilterContext> getColumnFilterCallback();
boolean isSkipCorrupt();
boolean getUseUTCTimestamp();
String getWriterTimezone();
OrcFile.Version getFileFormat();
ReaderEncryption getEncryption();
boolean useProlepticGregorian();
boolean fileUsedProlepticGregorian();
TypeReader.ReaderCategory getReaderCategory(int columnId);
}
public static class ReaderContext implements Context {
private SchemaEvolution evolution;
private boolean skipCorrupt = false;
private boolean useUTCTimestamp = false;
private String writerTimezone;
private OrcFile.Version fileFormat;
private ReaderEncryption encryption;
private boolean useProlepticGregorian;
private boolean fileUsedProlepticGregorian;
private Set<Integer> filterColumnIds = Collections.emptySet();
Consumer<OrcFilterContext> filterCallback;
public ReaderContext setSchemaEvolution(SchemaEvolution evolution) {
this.evolution = evolution;
return this;
}
public ReaderContext setEncryption(ReaderEncryption value) {
encryption = value;
return this;
}
public ReaderContext setFilterCallback(
Set<Integer> filterColumnsList, Consumer<OrcFilterContext> filterCallback) {
this.filterColumnIds = filterColumnsList;
this.filterCallback = filterCallback;
return this;
}
public ReaderContext skipCorrupt(boolean skipCorrupt) {
this.skipCorrupt = skipCorrupt;
return this;
}
public ReaderContext useUTCTimestamp(boolean useUTCTimestamp) {
this.useUTCTimestamp = useUTCTimestamp;
return this;
}
public ReaderContext writerTimeZone(String writerTimezone) {
this.writerTimezone = writerTimezone;
return this;
}
public ReaderContext fileFormat(OrcFile.Version version) {
this.fileFormat = version;
return this;
}
public ReaderContext setProlepticGregorian(boolean file,
boolean reader) {
this.useProlepticGregorian = reader;
this.fileUsedProlepticGregorian = file;
return this;
}
@Override
public SchemaEvolution getSchemaEvolution() {
return evolution;
}
@Override
public Set<Integer> getColumnFilterIds() {
return filterColumnIds;
}
@Override
public Consumer<OrcFilterContext> getColumnFilterCallback() {
return filterCallback;
}
@Override
public boolean isSkipCorrupt() {
return skipCorrupt;
}
@Override
public boolean getUseUTCTimestamp() {
return useUTCTimestamp;
}
@Override
public String getWriterTimezone() {
return writerTimezone;
}
@Override
public OrcFile.Version getFileFormat() {
return fileFormat;
}
@Override
public ReaderEncryption getEncryption() {
return encryption;
}
@Override
public boolean useProlepticGregorian() {
return useProlepticGregorian;
}
@Override
public boolean fileUsedProlepticGregorian() {
return fileUsedProlepticGregorian;
}
@Override
public TypeReader.ReaderCategory getReaderCategory(int columnId) {
TypeReader.ReaderCategory result;
if (getColumnFilterIds().contains(columnId)) {
// parent filter columns that might include non-filter children. These are classified as
// FILTER_PARENT. This is used during the reposition for non-filter read. Only Struct and
// Union Readers are supported currently
TypeDescription col = columnId == -1 ? null : getSchemaEvolution()
.getFileSchema()
.findSubtype(columnId);
if (col == null || col.getChildren() == null || col.getChildren().isEmpty()) {
result = TypeReader.ReaderCategory.FILTER_CHILD;
} else {
result = TypeReader.ReaderCategory.FILTER_PARENT;
}
} else {
result = TypeReader.ReaderCategory.NON_FILTER;
}
return result;
}
}
public abstract static class TreeReader implements TypeReader {
protected final int columnId;
protected BitFieldReader present = null;
protected final Context context;
protected final ReaderCategory readerCategory;
static final long[] powerOfTenTable = {
1L, // 0
10L,
100L,
1_000L,
10_000L,
100_000L,
1_000_000L,
10_000_000L,
100_000_000L, // 8
1_000_000_000L,
10_000_000_000L,
100_000_000_000L,
1_000_000_000_000L,
10_000_000_000_000L,
100_000_000_000_000L,
1_000_000_000_000_000L,
10_000_000_000_000_000L, // 16
100_000_000_000_000_000L,
1_000_000_000_000_000_000L, // 18
};
TreeReader(int columnId, Context context) throws IOException {
this(columnId, null, context);
}
protected TreeReader(int columnId, InStream in, @NotNull Context context) throws IOException {
this.columnId = columnId;
this.context = context;
if (in == null) {
present = null;
} else {
present = new BitFieldReader(in);
}
this.readerCategory = context.getReaderCategory(columnId);
}
@Override
public ReaderCategory getReaderCategory() {
return readerCategory;
}
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if (encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
protected static IntegerReader createIntegerReader(OrcProto.ColumnEncoding.Kind kind,
InStream in,
boolean signed,
Context context) throws IOException {
switch (kind) {
case DIRECT_V2:
case DICTIONARY_V2:
return new RunLengthIntegerReaderV2(in, signed,
context != null && context.isSkipCorrupt());
case DIRECT:
case DICTIONARY:
return new RunLengthIntegerReader(in, signed);
default:
throw new IllegalArgumentException("Unknown encoding " + kind);
}
}
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
checkEncoding(planner.getEncoding(columnId));
InStream in = planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.PRESENT));
if (in == null) {
present = null;
} else {
present = new BitFieldReader(in);
}
}
/**
* Seek to the given position.
*
* @param index the indexes loaded from the file
* @param readPhase the current readPhase
* @throws IOException
*/
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
if (present != null) {
present.seek(index);
}
}
protected static int countNonNullRowsInRange(boolean[] isNull, int start, int end) {
int result = 0;
while (start < end) {
if (!isNull[start++]) {
result++;
}
}
return result;
}
protected long countNonNulls(long rows) throws IOException {
if (present != null) {
long result = 0;
for (long c = 0; c < rows; ++c) {
if (present.next() == 1) {
result += 1;
}
}
return result;
} else {
return rows;
}
}
/**
* Populates the isNull vector array in the previousVector object based on
* the present stream values. This function is called from all the child
* readers, and they all set the values based on isNull field value.
*
* @param previous The columnVector object whose isNull value is populated
* @param isNull Whether the each value was null at a higher level. If
* isNull is null, all values are non-null.
* @param batchSize Size of the column vector
* @param filterContext the information about the rows that were selected
* by the filter.
* @param readPhase The read level
* @throws IOException
*/
public void nextVector(ColumnVector previous,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
if (present != null || isNull != null) {
// Set noNulls and isNull vector of the ColumnVector based on
// present stream
previous.noNulls = true;
boolean allNull = true;
for (int i = 0; i < batchSize; i++) {
if (isNull == null || !isNull[i]) {
if (present != null && present.next() != 1) {
previous.noNulls = false;
previous.isNull[i] = true;
} else {
previous.isNull[i] = false;
allNull = false;
}
} else {
previous.noNulls = false;
previous.isNull[i] = true;
}
}
previous.isRepeating = !previous.noNulls && allNull;
} else {
// There is no present stream, this means that all the values are
// present.
previous.noNulls = true;
for (int i = 0; i < batchSize; i++) {
previous.isNull[i] = false;
}
}
}
public BitFieldReader getPresent() {
return present;
}
@Override
public int getColumnId() {
return columnId;
}
}
public static class NullTreeReader extends TreeReader {
public NullTreeReader(int columnId, Context context) throws IOException {
super(columnId, context);
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) {
// PASS
}
@Override
public void skipRows(long rows, ReadPhase readPhase) {
// PASS
}
@Override
public void seek(PositionProvider position, ReadPhase readPhase) {
// PASS
}
@Override
public void seek(PositionProvider[] position,
ReadPhase readPhase) {
// PASS
}
@Override
public void nextVector(ColumnVector vector, boolean[] isNull, int size,
FilterContext filterContext, ReadPhase readPhase) {
vector.noNulls = false;
vector.isNull[0] = true;
vector.isRepeating = true;
}
}
public static class BooleanTreeReader extends TreeReader {
protected BitFieldReader reader = null;
BooleanTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, context);
}
protected BooleanTreeReader(int columnId,
InStream present,
InStream data,
Context context) throws IOException {
super(columnId, present, context);
if (data != null) {
reader = new BitFieldReader(data);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
reader = new BitFieldReader(planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)));
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
LongColumnVector result = (LongColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (filterContext.isSelectedInUse()) {
reader.nextVector(result, filterContext, batchSize);
} else {
// Read value entries based on isNull entries
reader.nextVector(result, batchSize);
}
}
}
public static class ByteTreeReader extends TreeReader {
protected RunLengthByteReader reader = null;
ByteTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, context);
}
protected ByteTreeReader(
int columnId, InStream present, InStream data, Context context) throws IOException {
super(columnId, present, context);
this.reader = new RunLengthByteReader(data);
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
reader = new RunLengthByteReader(planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)));
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final LongColumnVector result = (LongColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// Read value entries based on isNull entries
reader.nextVector(result, result.vector, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
}
public static class ShortTreeReader extends TreeReader {
protected IntegerReader reader = null;
ShortTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, context);
}
protected ShortTreeReader(int columnId, InStream present, InStream data,
OrcProto.ColumnEncoding encoding, Context context)
throws IOException {
super(columnId, present, context);
if (data != null && encoding != null) {
checkEncoding(encoding);
this.reader = createIntegerReader(encoding.getKind(), data, true, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
reader = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(name), true, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final LongColumnVector result = (LongColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// Read value entries based on isNull entries
reader.nextVector(result, result.vector, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
}
public static class IntTreeReader extends TreeReader {
protected IntegerReader reader = null;
IntTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, context);
}
protected IntTreeReader(int columnId, InStream present, InStream data,
OrcProto.ColumnEncoding encoding, Context context)
throws IOException {
super(columnId, present, context);
if (data != null && encoding != null) {
checkEncoding(encoding);
this.reader = createIntegerReader(encoding.getKind(), data, true, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
reader = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(name), true, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final LongColumnVector result = (LongColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// Read value entries based on isNull entries
reader.nextVector(result, result.vector, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
}
public static class LongTreeReader extends TreeReader {
protected IntegerReader reader = null;
LongTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, context);
}
protected LongTreeReader(int columnId, InStream present, InStream data,
OrcProto.ColumnEncoding encoding,
Context context)
throws IOException {
super(columnId, present, context);
if (data != null && encoding != null) {
checkEncoding(encoding);
this.reader = createIntegerReader(encoding.getKind(), data, true, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
reader = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(name), true, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final LongColumnVector result = (LongColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// Read value entries based on isNull entries
reader.nextVector(result, result.vector, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
}
public static class FloatTreeReader extends TreeReader {
protected InStream stream;
private final SerializationUtils utils;
FloatTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, context);
}
protected FloatTreeReader(int columnId,
InStream present,
InStream data,
Context context) throws IOException {
super(columnId, present, context);
this.utils = new SerializationUtils();
this.stream = data;
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = planner.getStream(name);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
stream.seek(index);
}
private void nextVector(DoubleColumnVector result,
boolean[] isNull,
final int batchSize) throws IOException {
final boolean hasNulls = !result.noNulls;
boolean allNulls = hasNulls;
if (batchSize > 0) {
if (hasNulls) {
// conditions to ensure bounds checks skips
for (int i = 0; batchSize <= result.isNull.length && i < batchSize; i++) {
allNulls = allNulls & result.isNull[i];
}
if (allNulls) {
result.vector[0] = Double.NaN;
result.isRepeating = true;
} else {
// some nulls
result.isRepeating = false;
// conditions to ensure bounds checks skips
for (int i = 0; batchSize <= result.isNull.length &&
batchSize <= result.vector.length && i < batchSize; i++) {
if (!result.isNull[i]) {
result.vector[i] = utils.readFloat(stream);
} else {
// If the value is not present then set NaN
result.vector[i] = Double.NaN;
}
}
}
} else {
// no nulls & > 1 row (check repeating)
boolean repeating = (batchSize > 1);
final float f1 = utils.readFloat(stream);
result.vector[0] = f1;
// conditions to ensure bounds checks skips
for (int i = 1; i < batchSize && batchSize <= result.vector.length; i++) {
final float f2 = utils.readFloat(stream);
repeating = repeating && (f1 == f2);
result.vector[i] = f2;
}
result.isRepeating = repeating;
}
}
}
private void nextVector(DoubleColumnVector result,
boolean[] isNull,
FilterContext filterContext,
final int batchSize) throws IOException {
final boolean hasNulls = !result.noNulls;
boolean allNulls = hasNulls;
result.isRepeating = false;
int previousIdx = 0;
if (batchSize > 0) {
if (hasNulls) {
// conditions to ensure bounds checks skips
for (int i = 0; batchSize <= result.isNull.length && i < batchSize; i++) {
allNulls = allNulls & result.isNull[i];
}
if (allNulls) {
result.vector[0] = Double.NaN;
result.isRepeating = true;
} else {
// some nulls
// conditions to ensure bounds checks skips
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
utils.skipFloat(stream, countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[idx]) {
result.vector[idx] = utils.readFloat(stream);
} else {
// If the value is not present then set NaN
result.vector[idx] = Double.NaN;
}
previousIdx = idx + 1;
}
utils.skipFloat(stream, countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
}
} else {
// Read only the selected row indexes and skip the rest
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
utils.skipFloat(stream,idx - previousIdx);
}
result.vector[idx] = utils.readFloat(stream);
previousIdx = idx + 1;
}
utils.skipFloat(stream,batchSize - previousIdx);
}
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final DoubleColumnVector result = (DoubleColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (filterContext.isSelectedInUse()) {
nextVector(result, isNull, filterContext, batchSize);
} else {
nextVector(result, isNull, batchSize);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
for (int i = 0; i < items; ++i) {
utils.readFloat(stream);
}
}
}
public static class DoubleTreeReader extends TreeReader {
protected InStream stream;
private final SerializationUtils utils;
DoubleTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, context);
}
protected DoubleTreeReader(int columnId,
InStream present,
InStream data,
Context context) throws IOException {
super(columnId, present, context);
this.utils = new SerializationUtils();
this.stream = data;
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name =
new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = planner.getStream(name);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
stream.seek(index);
}
private void nextVector(DoubleColumnVector result,
boolean[] isNull,
FilterContext filterContext,
final int batchSize) throws IOException {
final boolean hasNulls = !result.noNulls;
boolean allNulls = hasNulls;
result.isRepeating = false;
if (batchSize != 0) {
if (hasNulls) {
// conditions to ensure bounds checks skips
for (int i = 0; i < batchSize && batchSize <= result.isNull.length; i++) {
allNulls = allNulls & result.isNull[i];
}
if (allNulls) {
result.vector[0] = Double.NaN;
result.isRepeating = true;
} else {
// some nulls
int previousIdx = 0;
// conditions to ensure bounds checks skips
for (int i = 0; batchSize <= result.isNull.length &&
i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
utils.skipDouble(stream, countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[idx]) {
result.vector[idx] = utils.readDouble(stream);
} else {
// If the value is not present then set NaN
result.vector[idx] = Double.NaN;
}
previousIdx = idx + 1;
}
utils.skipDouble(stream,
countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
}
} else {
// no nulls
int previousIdx = 0;
// Read only the selected row indexes and skip the rest
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
utils.skipDouble(stream, idx - previousIdx);
}
result.vector[idx] = utils.readDouble(stream);
previousIdx = idx + 1;
}
utils.skipDouble(stream, batchSize - previousIdx);
}
}
}
private void nextVector(DoubleColumnVector result,
boolean[] isNull,
final int batchSize) throws IOException {
final boolean hasNulls = !result.noNulls;
boolean allNulls = hasNulls;
if (batchSize != 0) {
if (hasNulls) {
// conditions to ensure bounds checks skips
for (int i = 0; i < batchSize && batchSize <= result.isNull.length; i++) {
allNulls = allNulls & result.isNull[i];
}
if (allNulls) {
result.vector[0] = Double.NaN;
result.isRepeating = true;
} else {
// some nulls
result.isRepeating = false;
// conditions to ensure bounds checks skips
for (int i = 0; batchSize <= result.isNull.length &&
batchSize <= result.vector.length && i < batchSize; i++) {
if (!result.isNull[i]) {
result.vector[i] = utils.readDouble(stream);
} else {
// If the value is not present then set NaN
result.vector[i] = Double.NaN;
}
}
}
} else {
// no nulls
boolean repeating = (batchSize > 1);
final double d1 = utils.readDouble(stream);
result.vector[0] = d1;
// conditions to ensure bounds checks skips
for (int i = 1; i < batchSize && batchSize <= result.vector.length; i++) {
final double d2 = utils.readDouble(stream);
repeating = repeating && (d1 == d2);
result.vector[i] = d2;
}
result.isRepeating = repeating;
}
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final DoubleColumnVector result = (DoubleColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (filterContext.isSelectedInUse()) {
nextVector(result, isNull, filterContext, batchSize);
} else {
nextVector(result, isNull, batchSize);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
long len = items * 8;
while (len > 0) {
len -= stream.skip(len);
}
}
}
public static class BinaryTreeReader extends TreeReader {
protected InStream stream;
protected IntegerReader lengths = null;
protected final LongColumnVector scratchlcv;
BinaryTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, null, context);
}
protected BinaryTreeReader(int columnId, InStream present, InStream data, InStream length,
OrcProto.ColumnEncoding encoding, Context context) throws IOException {
super(columnId, present, context);
scratchlcv = new LongColumnVector();
this.stream = data;
if (length != null && encoding != null) {
checkEncoding(encoding);
this.lengths = createIntegerReader(encoding.getKind(), length, false, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = planner.getStream(name);
lengths = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)), false, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
stream.seek(index);
lengths.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final BytesColumnVector result = (BytesColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
scratchlcv.ensureSize(batchSize, false);
BytesColumnVectorUtil.readOrcByteArrays(stream, lengths, scratchlcv, result, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
long lengthToSkip = 0;
for (int i = 0; i < items; ++i) {
lengthToSkip += lengths.next();
}
while (lengthToSkip > 0) {
lengthToSkip -= stream.skip(lengthToSkip);
}
}
}
public static class TimestampTreeReader extends TreeReader {
protected IntegerReader data = null;
protected IntegerReader nanos = null;
private final Map<String, Long> baseTimestampMap;
protected long base_timestamp;
private final TimeZone readerTimeZone;
private final boolean instantType;
private TimeZone writerTimeZone;
private boolean hasSameTZRules;
private final ThreadLocal<DateFormat> threadLocalDateFormat;
private final boolean useProleptic;
private final boolean fileUsesProleptic;
TimestampTreeReader(int columnId, Context context,
boolean instantType) throws IOException {
this(columnId, null, null, null, null, context, instantType);
}
protected TimestampTreeReader(int columnId, InStream presentStream, InStream dataStream,
InStream nanosStream,
OrcProto.ColumnEncoding encoding,
Context context,
boolean instantType) throws IOException {
super(columnId, presentStream, context);
this.instantType = instantType;
this.threadLocalDateFormat = new ThreadLocal<>();
this.threadLocalDateFormat.set(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
this.baseTimestampMap = new HashMap<>();
if (instantType || context.getUseUTCTimestamp()) {
this.readerTimeZone = TimeZone.getTimeZone("UTC");
} else {
this.readerTimeZone = TimeZone.getDefault();
}
if (context.getWriterTimezone() == null || context.getWriterTimezone().isEmpty()) {
if (instantType) {
this.base_timestamp = getBaseTimestamp(readerTimeZone.getID()); // UTC
} else {
this.base_timestamp = getBaseTimestamp(TimeZone.getDefault().getID());
}
} else {
this.base_timestamp = getBaseTimestamp(context.getWriterTimezone());
}
if (encoding != null) {
checkEncoding(encoding);
if (dataStream != null) {
this.data = createIntegerReader(encoding.getKind(), dataStream, true, context);
}
if (nanosStream != null) {
this.nanos = createIntegerReader(encoding.getKind(), nanosStream, false, context);
}
}
fileUsesProleptic = context.fileUsedProlepticGregorian();
useProleptic = context.useProlepticGregorian();
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
OrcProto.ColumnEncoding.Kind kind = planner.getEncoding(columnId).getKind();
data = createIntegerReader(kind,
planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.DATA)), true, context);
nanos = createIntegerReader(kind,
planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.SECONDARY)), false, context);
if (!instantType) {
base_timestamp = getBaseTimestamp(planner.getWriterTimezone());
}
}
protected long getBaseTimestamp(String timeZoneId) throws IOException {
// to make sure new readers read old files in the same way
if (timeZoneId == null || timeZoneId.isEmpty()) {
timeZoneId = writerTimeZone.getID();
}
if (writerTimeZone == null || !timeZoneId.equals(writerTimeZone.getID())) {
writerTimeZone = TimeZone.getTimeZone(timeZoneId);
hasSameTZRules = writerTimeZone.hasSameRules(readerTimeZone);
if (!baseTimestampMap.containsKey(timeZoneId)) {
threadLocalDateFormat.get().setTimeZone(writerTimeZone);
try {
long epoch = threadLocalDateFormat.get()
.parse(TimestampTreeWriter.BASE_TIMESTAMP_STRING).getTime() /
TimestampTreeWriter.MILLIS_PER_SECOND;
baseTimestampMap.put(timeZoneId, epoch);
return epoch;
} catch (ParseException e) {
throw new IOException("Unable to create base timestamp", e);
} finally {
threadLocalDateFormat.get().setTimeZone(readerTimeZone);
}
} else {
return baseTimestampMap.get(timeZoneId);
}
}
return base_timestamp;
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
data.seek(index);
nanos.seek(index);
}
public void readTimestamp(TimestampColumnVector result, int idx) throws IOException {
final int newNanos = parseNanos(nanos.next());
long millis = (data.next() + base_timestamp)
* TimestampTreeWriter.MILLIS_PER_SECOND + newNanos / 1_000_000;
if (millis < 0 && newNanos > 999_999) {
millis -= TimestampTreeWriter.MILLIS_PER_SECOND;
}
long offset = 0;
// If reader and writer time zones have different rules, adjust the timezone difference
// between reader and writer taking day light savings into account.
if (!hasSameTZRules) {
offset = SerializationUtils.convertBetweenTimezones(writerTimeZone,
readerTimeZone, millis);
}
result.time[idx] = millis + offset;
result.nanos[idx] = newNanos;
}
public void nextVector(TimestampColumnVector result,
boolean[] isNull,
final int batchSize) throws IOException {
for (int i = 0; i < batchSize; i++) {
if (result.noNulls || !result.isNull[i]) {
readTimestamp(result, i);
if (result.isRepeating && i != 0 &&
(result.time[0] != result.time[i] ||
result.nanos[0] != result.nanos[i])) {
result.isRepeating = false;
}
}
}
result.changeCalendar(useProleptic, true);
}
public void nextVector(TimestampColumnVector result,
boolean[] isNull,
FilterContext filterContext,
final int batchSize) throws IOException {
result.isRepeating = false;
int previousIdx = 0;
if (result.noNulls) {
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
skipStreamRows(idx - previousIdx);
}
readTimestamp(result, idx);
previousIdx = idx + 1;
}
skipStreamRows(batchSize - previousIdx);
} else {
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[idx]) {
readTimestamp(result, idx);
}
previousIdx = idx + 1;
}
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
}
result.changeCalendar(useProleptic, true);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
TimestampColumnVector result = (TimestampColumnVector) previousVector;
result.changeCalendar(fileUsesProleptic, false);
super.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
result.setIsUTC(context.getUseUTCTimestamp());
if (filterContext.isSelectedInUse()) {
nextVector(result, isNull, filterContext, batchSize);
} else {
nextVector(result, isNull, batchSize);
}
}
private static int parseNanos(long serialized) {
int zeros = 7 & (int) serialized;
int result = (int) (serialized >>> 3);
if (zeros != 0) {
result *= (int) powerOfTenTable[zeros + 1];
}
return result;
}
void skipStreamRows(long items) throws IOException {
data.skip(items);
nanos.skip(items);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
data.skip(items);
nanos.skip(items);
}
}
public static class DateTreeReader extends TreeReader {
protected IntegerReader reader = null;
private final boolean needsDateColumnVector;
private final boolean useProleptic;
private final boolean fileUsesProleptic;
DateTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, context);
}
protected DateTreeReader(int columnId, InStream present, InStream data,
OrcProto.ColumnEncoding encoding, Context context) throws IOException {
super(columnId, present, context);
useProleptic = context.useProlepticGregorian();
fileUsesProleptic = context.fileUsedProlepticGregorian();
// if either side is proleptic, we need a DateColumnVector
needsDateColumnVector = useProleptic || fileUsesProleptic;
if (data != null && encoding != null) {
checkEncoding(encoding);
reader = createIntegerReader(encoding.getKind(), data, true, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
reader = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(name), true, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final LongColumnVector result = (LongColumnVector) previousVector;
if (needsDateColumnVector) {
if (result instanceof DateColumnVector) {
((DateColumnVector) result).changeCalendar(fileUsesProleptic, false);
} else {
throw new IllegalArgumentException("Can't use LongColumnVector to " +
"read proleptic Gregorian dates.");
}
}
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// Read value entries based on isNull entries
reader.nextVector(result, result.vector, batchSize);
if (needsDateColumnVector) {
((DateColumnVector) result).changeCalendar(useProleptic, true);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
}
public static class DecimalTreeReader extends TreeReader {
protected final int precision;
protected final int scale;
protected InStream valueStream;
protected IntegerReader scaleReader = null;
private int[] scratchScaleVector;
private final byte[] scratchBytes;
DecimalTreeReader(int columnId,
int precision,
int scale,
Context context) throws IOException {
this(columnId, null, null, null, null, precision, scale, context);
}
protected DecimalTreeReader(int columnId,
InStream present,
InStream valueStream,
InStream scaleStream,
OrcProto.ColumnEncoding encoding,
int precision,
int scale,
Context context) throws IOException {
super(columnId, present, context);
this.precision = precision;
this.scale = scale;
this.scratchScaleVector = new int[VectorizedRowBatch.DEFAULT_SIZE];
this.valueStream = valueStream;
this.scratchBytes = new byte[HiveDecimal.SCRATCH_BUFFER_LEN_SERIALIZATION_UTILS_READ];
if (scaleStream != null && encoding != null) {
checkEncoding(encoding);
this.scaleReader = createIntegerReader(encoding.getKind(), scaleStream, true, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
valueStream = planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.DATA));
scaleReader = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(new StreamName(columnId, OrcProto.Stream.Kind.SECONDARY)),
true, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
valueStream.seek(index);
scaleReader.seek(index);
}
private void nextVector(DecimalColumnVector result,
boolean[] isNull,
final int batchSize) throws IOException {
if (batchSize > scratchScaleVector.length) {
scratchScaleVector = new int[(int) batchSize];
}
// read the scales
scaleReader.nextVector(result, scratchScaleVector, batchSize);
// Read value entries based on isNull entries
// Use the fast ORC deserialization method that emulates SerializationUtils.readBigInteger
// provided by HiveDecimalWritable.
HiveDecimalWritable[] vector = result.vector;
HiveDecimalWritable decWritable;
if (result.noNulls) {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
decWritable = vector[r];
if (!decWritable.serializationUtilsRead(
valueStream, scratchScaleVector[r],
scratchBytes)) {
result.isNull[r] = true;
result.noNulls = false;
}
setIsRepeatingIfNeeded(result, r);
}
} else if (!result.isRepeating || !result.isNull[0]) {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
if (!result.isNull[r]) {
decWritable = vector[r];
if (!decWritable.serializationUtilsRead(
valueStream, scratchScaleVector[r],
scratchBytes)) {
result.isNull[r] = true;
result.noNulls = false;
}
}
setIsRepeatingIfNeeded(result, r);
}
}
}
private void nextVector(DecimalColumnVector result,
boolean[] isNull,
FilterContext filterContext,
final int batchSize) throws IOException {
// Allocate space for the whole array
if (batchSize > scratchScaleVector.length) {
scratchScaleVector = new int[(int) batchSize];
}
// But read only read the scales that are needed
scaleReader.nextVector(result, scratchScaleVector, batchSize);
// Read value entries based on isNull entries
// Use the fast ORC deserialization method that emulates SerializationUtils.readBigInteger
// provided by HiveDecimalWritable.
HiveDecimalWritable[] vector = result.vector;
HiveDecimalWritable decWritable;
if (result.noNulls) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); ++r) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
skipStreamRows(idx - previousIdx);
}
decWritable = vector[idx];
if (!decWritable.serializationUtilsRead(
valueStream, scratchScaleVector[idx],
scratchBytes)) {
result.isNull[idx] = true;
result.noNulls = false;
}
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
skipStreamRows(batchSize - previousIdx);
} else if (!result.isRepeating || !result.isNull[0]) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); ++r) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[idx]) {
decWritable = vector[idx];
if (!decWritable.serializationUtilsRead(
valueStream, scratchScaleVector[idx],
scratchBytes)) {
result.isNull[idx] = true;
result.noNulls = false;
}
}
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
}
}
private void nextVector(Decimal64ColumnVector result,
boolean[] isNull,
final int batchSize) throws IOException {
if (precision > TypeDescription.MAX_DECIMAL64_PRECISION) {
throw new IllegalArgumentException("Reading large precision type into" +
" Decimal64ColumnVector.");
}
if (batchSize > scratchScaleVector.length) {
scratchScaleVector = new int[(int) batchSize];
}
// read the scales
scaleReader.nextVector(result, scratchScaleVector, batchSize);
if (result.noNulls) {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
final long scaleFactor = powerOfTenTable[scale - scratchScaleVector[r]];
result.vector[r] = SerializationUtils.readVslong(valueStream) * scaleFactor;
setIsRepeatingIfNeeded(result, r);
}
} else if (!result.isRepeating || !result.isNull[0]) {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
if (!result.isNull[r]) {
final long scaleFactor = powerOfTenTable[scale - scratchScaleVector[r]];
result.vector[r] = SerializationUtils.readVslong(valueStream) * scaleFactor;
}
setIsRepeatingIfNeeded(result, r);
}
}
result.precision = (short) precision;
result.scale = (short) scale;
}
private void nextVector(Decimal64ColumnVector result,
boolean[] isNull,
FilterContext filterContext,
final int batchSize) throws IOException {
if (precision > TypeDescription.MAX_DECIMAL64_PRECISION) {
throw new IllegalArgumentException("Reading large precision type into" +
" Decimal64ColumnVector.");
}
// Allocate space for the whole array
if (batchSize > scratchScaleVector.length) {
scratchScaleVector = new int[(int) batchSize];
}
// Read all the scales
scaleReader.nextVector(result, scratchScaleVector, batchSize);
if (result.noNulls) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); r++) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
skipStreamRows(idx - previousIdx);
}
result.vector[idx] = SerializationUtils.readVslong(valueStream);
for (int s=scratchScaleVector[idx]; s < scale; ++s) {
result.vector[idx] *= 10;
}
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
skipStreamRows(batchSize - previousIdx);
} else if (!result.isRepeating || !result.isNull[0]) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); r++) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[idx]) {
result.vector[idx] = SerializationUtils.readVslong(valueStream);
for (int s=scratchScaleVector[idx]; s < scale; ++s) {
result.vector[idx] *= 10;
}
}
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
skipStreamRows(countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
}
result.precision = (short) precision;
result.scale = (short) scale;
}
private void setIsRepeatingIfNeeded(Decimal64ColumnVector result, int index) {
if (result.isRepeating && index > 0 && (result.vector[0] != result.vector[index] ||
result.isNull[0] != result.isNull[index])) {
result.isRepeating = false;
}
}
private void setIsRepeatingIfNeeded(DecimalColumnVector result, int index) {
if (result.isRepeating && index > 0 && (!result.vector[0].equals(result.vector[index]) ||
result.isNull[0] != result.isNull[index])) {
result.isRepeating = false;
}
}
@Override
public void nextVector(ColumnVector result,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (result instanceof Decimal64ColumnVector) {
if (filterContext.isSelectedInUse()) {
nextVector((Decimal64ColumnVector) result, isNull, filterContext, batchSize);
} else {
nextVector((Decimal64ColumnVector) result, isNull, batchSize);
}
} else {
if (filterContext.isSelectedInUse()) {
nextVector((DecimalColumnVector) result, isNull, filterContext, batchSize);
} else {
nextVector((DecimalColumnVector) result, isNull, batchSize);
}
}
}
void skipStreamRows(long items) throws IOException {
for (int i = 0; i < items; i++) {
int input;
do {
input = valueStream.read();
if (input == -1) {
throw new EOFException("Reading BigInteger past EOF from " + valueStream);
}
} while(input >= 128);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
HiveDecimalWritable scratchDecWritable = new HiveDecimalWritable();
for (int i = 0; i < items; i++) {
scratchDecWritable.serializationUtilsRead(valueStream, 0, scratchBytes);
}
scaleReader.skip(items);
}
}
public static class Decimal64TreeReader extends TreeReader {
protected final int precision;
protected final int scale;
protected final boolean skipCorrupt;
protected RunLengthIntegerReaderV2 valueReader;
Decimal64TreeReader(int columnId,
int precision,
int scale,
Context context) throws IOException {
this(columnId, null, null, null, precision, scale, context);
}
protected Decimal64TreeReader(int columnId,
InStream present,
InStream valueStream,
OrcProto.ColumnEncoding encoding,
int precision,
int scale,
Context context) throws IOException {
super(columnId, present, context);
this.precision = precision;
this.scale = scale;
valueReader = new RunLengthIntegerReaderV2(valueStream, true,
context.isSkipCorrupt());
skipCorrupt = context.isSkipCorrupt();
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if (encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT &&
encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
InStream stream = planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.DATA));
valueReader = new RunLengthIntegerReaderV2(stream, true, skipCorrupt);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
valueReader.seek(index);
}
private void nextVector(DecimalColumnVector result,
FilterContext filterContext,
final int batchSize) throws IOException {
if (result.noNulls) {
if (filterContext.isSelectedInUse()) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); ++r) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
valueReader.skip(idx - previousIdx);
}
result.vector[idx].setFromLongAndScale(valueReader.next(), scale);
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
valueReader.skip(batchSize - previousIdx);
} else {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
result.vector[r].setFromLongAndScale(valueReader.next(), scale);
setIsRepeatingIfNeeded(result, r);
}
}
} else if (!result.isRepeating || !result.isNull[0]) {
if (filterContext.isSelectedInUse()) {
result.isRepeating = true;
int previousIdx = 0;
for (int r = 0; r != filterContext.getSelectedSize(); ++r) {
int idx = filterContext.getSelected()[r];
if (idx - previousIdx > 0) {
valueReader.skip(countNonNullRowsInRange(result.isNull, previousIdx, idx));
}
if (!result.isNull[r]) {
result.vector[idx].setFromLongAndScale(valueReader.next(), scale);
}
setIsRepeatingIfNeeded(result, idx);
previousIdx = idx + 1;
}
valueReader.skip(countNonNullRowsInRange(result.isNull, previousIdx, batchSize));
} else {
result.isRepeating = true;
for (int r = 0; r < batchSize; ++r) {
if (!result.isNull[r]) {
result.vector[r].setFromLongAndScale(valueReader.next(), scale);
}
setIsRepeatingIfNeeded(result, r);
}
}
}
result.precision = (short) precision;
result.scale = (short) scale;
}
private void nextVector(Decimal64ColumnVector result,
FilterContext filterContext,
final int batchSize) throws IOException {
valueReader.nextVector(result, result.vector, batchSize);
result.precision = (short) precision;
result.scale = (short) scale;
}
private void setIsRepeatingIfNeeded(DecimalColumnVector result, int index) {
if (result.isRepeating && index > 0 && (!result.vector[0].equals(result.vector[index]) ||
result.isNull[0] != result.isNull[index])) {
result.isRepeating = false;
}
}
@Override
public void nextVector(ColumnVector result,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (result instanceof Decimal64ColumnVector) {
nextVector((Decimal64ColumnVector) result, filterContext, batchSize);
} else {
nextVector((DecimalColumnVector) result, filterContext, batchSize);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
valueReader.skip(items);
}
}
/**
* A tree reader that will read string columns. At the start of the
* stripe, it creates an internal reader based on whether a direct or
* dictionary encoding was used.
*/
public static class StringTreeReader extends TreeReader {
protected TypeReader reader;
StringTreeReader(int columnId, Context context) throws IOException {
super(columnId, context);
}
protected StringTreeReader(int columnId, InStream present, InStream data, InStream length,
InStream dictionary, OrcProto.ColumnEncoding encoding, Context context) throws IOException {
super(columnId, present, context);
if (encoding != null) {
switch (encoding.getKind()) {
case DIRECT:
case DIRECT_V2:
reader = new StringDirectTreeReader(columnId, present, data, length,
encoding.getKind(), context);
break;
case DICTIONARY:
case DICTIONARY_V2:
reader = new StringDictionaryTreeReader(columnId, present, data, length, dictionary,
encoding, context);
break;
default:
throw new IllegalArgumentException("Unsupported encoding " +
encoding.getKind());
}
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
reader.checkEncoding(encoding);
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
// For each stripe, checks the encoding and initializes the appropriate
// reader
switch (planner.getEncoding(columnId).getKind()) {
case DIRECT:
case DIRECT_V2:
reader = new StringDirectTreeReader(columnId, context);
break;
case DICTIONARY:
case DICTIONARY_V2:
reader = new StringDictionaryTreeReader(columnId, context);
break;
default:
throw new IllegalArgumentException("Unsupported encoding " +
planner.getEncoding(columnId).getKind());
}
reader.startStripe(planner, readPhase);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
reader.seek(index, readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
reader.seek(index, readPhase);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
reader.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skipRows(items, readPhase);
}
}
// This class collects together very similar methods for reading an ORC vector of byte arrays and
// creating the BytesColumnVector.
//
public static class BytesColumnVectorUtil {
private static byte[] commonReadByteArrays(InStream stream, IntegerReader lengths,
LongColumnVector scratchlcv,
BytesColumnVector result, final int batchSize) throws IOException {
// Read lengths
scratchlcv.isRepeating = result.isRepeating;
scratchlcv.noNulls = result.noNulls;
scratchlcv.isNull = result.isNull; // Notice we are replacing the isNull vector here...
lengths.nextVector(scratchlcv, scratchlcv.vector, batchSize);
int totalLength = 0;
if (!scratchlcv.isRepeating) {
for (int i = 0; i < batchSize; i++) {
if (!scratchlcv.isNull[i]) {
totalLength += (int) scratchlcv.vector[i];
}
}
} else {
if (!scratchlcv.isNull[0]) {
totalLength = (int) (batchSize * scratchlcv.vector[0]);
}
}
if (totalLength < 0) {
StringBuilder sb = new StringBuilder("totalLength:" + totalLength
+ " is a negative number.");
if (batchSize > 1) {
sb.append(" The current batch size is ");
sb.append(batchSize);
sb.append(", you can reduce the value by '");
sb.append(OrcConf.ROW_BATCH_SIZE.getAttribute());
sb.append("'.");
}
throw new IOException(sb.toString());
}
// Read all the strings for this batch
byte[] allBytes = new byte[totalLength];
int offset = 0;
int len = totalLength;
while (len > 0) {
int bytesRead = stream.read(allBytes, offset, len);
if (bytesRead < 0) {
throw new EOFException("Can't finish byte read from " + stream);
}
len -= bytesRead;
offset += bytesRead;
}
return allBytes;
}
// This method has the common code for reading in bytes into a BytesColumnVector.
public static void readOrcByteArrays(InStream stream,
IntegerReader lengths,
LongColumnVector scratchlcv,
BytesColumnVector result,
final int batchSize) throws IOException {
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
byte[] allBytes =
commonReadByteArrays(stream, lengths, scratchlcv, result, batchSize);
// Too expensive to figure out 'repeating' by comparisons.
result.isRepeating = false;
int offset = 0;
if (!scratchlcv.isRepeating) {
for (int i = 0; i < batchSize; i++) {
if (!scratchlcv.isNull[i]) {
result.setRef(i, allBytes, offset, (int) scratchlcv.vector[i]);
offset += scratchlcv.vector[i];
} else {
result.setRef(i, allBytes, 0, 0);
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!scratchlcv.isNull[i]) {
result.setRef(i, allBytes, offset, (int) scratchlcv.vector[0]);
offset += scratchlcv.vector[0];
} else {
result.setRef(i, allBytes, 0, 0);
}
}
}
}
}
}
/**
* A reader for string columns that are direct encoded in the current
* stripe.
*/
public static class StringDirectTreeReader extends TreeReader {
private static final HadoopShims SHIMS = HadoopShimsFactory.get();
protected InStream stream;
protected IntegerReader lengths;
private final LongColumnVector scratchlcv;
StringDirectTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, null, context);
}
protected StringDirectTreeReader(int columnId, InStream present, InStream data,
InStream length, OrcProto.ColumnEncoding.Kind encoding,
Context context) throws IOException {
super(columnId, present, context);
this.scratchlcv = new LongColumnVector();
this.stream = data;
if (length != null && encoding != null) {
this.lengths = createIntegerReader(encoding, length, false, context);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if (encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT &&
encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DATA);
stream = planner.getStream(name);
lengths = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(new StreamName(columnId, OrcProto.Stream.Kind.LENGTH)),
false, context);
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
stream.seek(index);
// don't seek data stream
lengths.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final BytesColumnVector result = (BytesColumnVector) previousVector;
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
scratchlcv.ensureSize(batchSize, false);
BytesColumnVectorUtil.readOrcByteArrays(stream, lengths, scratchlcv,
result, batchSize);
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
long lengthToSkip = 0;
for (int i = 0; i < items; ++i) {
lengthToSkip += lengths.next();
}
while (lengthToSkip > 0) {
lengthToSkip -= stream.skip(lengthToSkip);
}
}
public IntegerReader getLengths() {
return lengths;
}
public InStream getStream() {
return stream;
}
}
/**
* A reader for string columns that are dictionary encoded in the current
* stripe.
*/
public static class StringDictionaryTreeReader extends TreeReader {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
private int[] dictionaryOffsets;
protected IntegerReader reader;
private InStream lengthStream;
private InStream dictionaryStream;
private OrcProto.ColumnEncoding lengthEncoding;
private byte[] dictionaryBuffer = null;
private final LongColumnVector scratchlcv;
private boolean initDictionary = false;
StringDictionaryTreeReader(int columnId, Context context) throws IOException {
this(columnId, null, null, null, null, null, context);
}
protected StringDictionaryTreeReader(int columnId, InStream present, InStream data,
InStream length, InStream dictionary, OrcProto.ColumnEncoding encoding,
Context context) throws IOException {
super(columnId, present, context);
scratchlcv = new LongColumnVector();
if (data != null && encoding != null) {
this.reader = createIntegerReader(encoding.getKind(), data, false, context);
}
lengthStream = length;
dictionaryStream = dictionary;
lengthEncoding = encoding;
initDictionary = false;
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if (encoding.getKind() != OrcProto.ColumnEncoding.Kind.DICTIONARY &&
encoding.getKind() != OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
StreamName name = new StreamName(columnId,
OrcProto.Stream.Kind.DICTIONARY_DATA);
dictionaryStream = planner.getStream(name);
initDictionary = false;
// read the lengths
name = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH);
InStream in = planner.getStream(name);
OrcProto.ColumnEncoding encoding = planner.getEncoding(columnId);
readDictionaryLengthStream(in, encoding);
// set up the row reader
name = new StreamName(columnId, OrcProto.Stream.Kind.DATA);
reader = createIntegerReader(encoding.getKind(),
planner.getStream(name), false, context);
}
private void readDictionaryLengthStream(InStream in, OrcProto.ColumnEncoding encoding)
throws IOException {
int dictionarySize = encoding.getDictionarySize();
if (in != null) { // Guard against empty LENGTH stream.
IntegerReader lenReader = createIntegerReader(encoding.getKind(), in, false, context);
int offset = 0;
if (dictionaryOffsets == null ||
dictionaryOffsets.length < dictionarySize + 1) {
dictionaryOffsets = new int[dictionarySize + 1];
}
for (int i = 0; i < dictionarySize; ++i) {
dictionaryOffsets[i] = offset;
offset += (int) lenReader.next();
}
dictionaryOffsets[dictionarySize] = offset;
in.close();
}
}
private void readDictionaryStream(InStream in) throws IOException {
if (in != null) { // Guard against empty dictionary stream.
if (in.available() > 0) {
// remove reference to previous dictionary buffer
dictionaryBuffer = null;
int dictionaryBufferSize = dictionaryOffsets[dictionaryOffsets.length - 1];
dictionaryBuffer = new byte[dictionaryBufferSize];
int pos = 0;
// check if dictionary size is smaller than available stream size
// to avoid ArrayIndexOutOfBoundsException
int readSize = Math.min(in.available(), dictionaryBufferSize);
byte[] chunkBytes = new byte[readSize];
while (pos < dictionaryBufferSize) {
int currentLength = in.read(chunkBytes, 0, readSize);
// check if dictionary size is smaller than available stream size
// to avoid ArrayIndexOutOfBoundsException
currentLength = Math.min(currentLength, dictionaryBufferSize - pos);
System.arraycopy(chunkBytes, 0, dictionaryBuffer, pos, currentLength);
pos += currentLength;
}
}
in.close();
} else {
dictionaryBuffer = null;
}
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
seek(index[columnId], readPhase);
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
reader.seek(index);
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
final BytesColumnVector result = (BytesColumnVector) previousVector;
// remove reference to previous dictionary buffer
for (int i = 0; i < batchSize; i++) {
result.vector[i] = null;
}
// lazy read dictionary buffer,
// ensure there is at most one dictionary buffer in memory when reading cross different file stripes
if (!initDictionary) {
if (lengthStream != null && lengthEncoding != null) {
readDictionaryLengthStream(lengthStream, lengthEncoding);
}
if (dictionaryStream != null) {
readDictionaryStream(dictionaryStream);
}
initDictionary = true;
}
// Read present/isNull stream
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
readDictionaryByteArray(result, filterContext, batchSize);
}
private void readDictionaryByteArray(BytesColumnVector result,
FilterContext filterContext,
int batchSize) throws IOException {
int offset;
int length;
if (dictionaryBuffer != null) {
// Read string offsets
scratchlcv.isRepeating = result.isRepeating;
scratchlcv.noNulls = result.noNulls;
scratchlcv.isNull = result.isNull;
scratchlcv.ensureSize(batchSize, false);
reader.nextVector(scratchlcv, scratchlcv.vector, batchSize);
if (!scratchlcv.isRepeating) {
// The vector has non-repeating strings. Iterate thru the batch
// and set strings one by one
if (filterContext.isSelectedInUse()) {
// Set all string values to null - offset and length is zero
for (int i = 0; i < batchSize; i++) {
result.setRef(i, dictionaryBuffer, 0, 0);
}
// Read selected rows from stream
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (!scratchlcv.isNull[idx]) {
offset = dictionaryOffsets[(int) scratchlcv.vector[idx]];
length = getDictionaryEntryLength((int) scratchlcv.vector[idx], offset);
result.setRef(idx, dictionaryBuffer, offset, length);
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!scratchlcv.isNull[i]) {
offset = dictionaryOffsets[(int) scratchlcv.vector[i]];
length = getDictionaryEntryLength((int) scratchlcv.vector[i], offset);
result.setRef(i, dictionaryBuffer, offset, length);
} else {
// If the value is null then set offset and length to zero (null string)
result.setRef(i, dictionaryBuffer, 0, 0);
}
}
}
} else {
// If the value is repeating then just set the first value in the
// vector and set the isRepeating flag to true. No need to iterate thru and
// set all the elements to the same value
offset = dictionaryOffsets[(int) scratchlcv.vector[0]];
length = getDictionaryEntryLength((int) scratchlcv.vector[0], offset);
result.setRef(0, dictionaryBuffer, offset, length);
}
result.isRepeating = scratchlcv.isRepeating;
} else {
if (dictionaryOffsets == null) {
// Entire stripe contains null strings.
result.isRepeating = true;
result.noNulls = false;
result.isNull[0] = true;
result.setRef(0, EMPTY_BYTE_ARRAY, 0, 0);
} else {
// stripe contains nulls and empty strings
for (int i = 0; i < batchSize; i++) {
if (!result.isNull[i]) {
result.setRef(i, EMPTY_BYTE_ARRAY, 0, 0);
}
}
}
}
}
int getDictionaryEntryLength(int entry, int offset) {
final int length;
// if it isn't the last entry, subtract the offsets otherwise use
// the buffer length.
if (entry < dictionaryOffsets.length - 1) {
length = dictionaryOffsets[entry + 1] - offset;
} else {
length = dictionaryBuffer.length - offset;
}
return length;
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
reader.skip(countNonNulls(items));
}
public IntegerReader getReader() {
return reader;
}
}
public static class CharTreeReader extends StringTreeReader {
int maxLength;
CharTreeReader(int columnId, int maxLength, Context context) throws IOException {
this(columnId, maxLength, null, null, null, null, null, context);
}
protected CharTreeReader(int columnId, int maxLength, InStream present, InStream data,
InStream length, InStream dictionary, OrcProto.ColumnEncoding encoding,
Context context) throws IOException {
super(columnId, present, data, length, dictionary, encoding, context);
this.maxLength = maxLength;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
// Get the vector of strings from StringTreeReader, then make a 2nd pass to
// adjust down the length (right trim and truncate) if necessary.
super.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
BytesColumnVector result = (BytesColumnVector) previousVector;
int adjustedDownLen;
if (result.isRepeating) {
if (result.noNulls || !result.isNull[0]) {
adjustedDownLen = StringExpr
.rightTrimAndTruncate(result.vector[0], result.start[0], result.length[0], maxLength);
if (adjustedDownLen < result.length[0]) {
result.setRef(0, result.vector[0], result.start[0], adjustedDownLen);
}
}
} else {
if (result.noNulls) {
for (int i = 0; i < batchSize; i++) {
adjustedDownLen = StringExpr
.rightTrimAndTruncate(result.vector[i], result.start[i], result.length[i],
maxLength);
if (adjustedDownLen < result.length[i]) {
result.setRef(i, result.vector[i], result.start[i], adjustedDownLen);
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!result.isNull[i]) {
adjustedDownLen = StringExpr
.rightTrimAndTruncate(result.vector[i], result.start[i], result.length[i],
maxLength);
if (adjustedDownLen < result.length[i]) {
result.setRef(i, result.vector[i], result.start[i], adjustedDownLen);
}
}
}
}
}
}
}
public static class VarcharTreeReader extends StringTreeReader {
int maxLength;
VarcharTreeReader(int columnId, int maxLength, Context context) throws IOException {
this(columnId, maxLength, null, null, null, null, null, context);
}
protected VarcharTreeReader(int columnId, int maxLength, InStream present, InStream data,
InStream length, InStream dictionary,
OrcProto.ColumnEncoding encoding,
Context context) throws IOException {
super(columnId, present, data, length, dictionary, encoding, context);
this.maxLength = maxLength;
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
// Get the vector of strings from StringTreeReader, then make a 2nd pass to
// adjust down the length (truncate) if necessary.
super.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
BytesColumnVector result = (BytesColumnVector) previousVector;
int adjustedDownLen;
if (result.isRepeating) {
if (result.noNulls || !result.isNull[0]) {
adjustedDownLen = StringExpr
.truncate(result.vector[0], result.start[0], result.length[0], maxLength);
if (adjustedDownLen < result.length[0]) {
result.setRef(0, result.vector[0], result.start[0], adjustedDownLen);
}
}
} else {
if (result.noNulls) {
for (int i = 0; i < batchSize; i++) {
adjustedDownLen = StringExpr
.truncate(result.vector[i], result.start[i], result.length[i], maxLength);
if (adjustedDownLen < result.length[i]) {
result.setRef(i, result.vector[i], result.start[i], adjustedDownLen);
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!result.isNull[i]) {
adjustedDownLen = StringExpr
.truncate(result.vector[i], result.start[i], result.length[i], maxLength);
if (adjustedDownLen < result.length[i]) {
result.setRef(i, result.vector[i], result.start[i], adjustedDownLen);
}
}
}
}
}
}
}
public static class StructTreeReader extends TreeReader {
public final TypeReader[] fields;
protected StructTreeReader(int columnId,
TypeDescription readerSchema,
Context context) throws IOException {
super(columnId, null, context);
List<TypeDescription> childrenTypes = readerSchema.getChildren();
this.fields = new TypeReader[childrenTypes.size()];
for (int i = 0; i < fields.length; ++i) {
TypeDescription subtype = childrenTypes.get(i);
this.fields[i] = createTreeReader(subtype, context);
}
}
public TypeReader[] getChildReaders() {
return fields;
}
protected StructTreeReader(int columnId, InStream present,
Context context,
OrcProto.ColumnEncoding encoding,
TypeReader[] childReaders) throws IOException {
super(columnId, present, context);
if (encoding != null) {
checkEncoding(encoding);
}
this.fields = childReaders;
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
if (readPhase.contains(this.readerCategory)) {
super.seek(index, readPhase);
}
for (TypeReader kid : fields) {
if (kid != null && TypeReader.shouldProcessChild(kid, readPhase)) {
kid.seek(index, readPhase);
}
}
}
@Override
public void seek(PositionProvider index, ReadPhase readPhase) throws IOException {
if (readPhase.contains(this.readerCategory)) {
super.seek(index, readPhase);
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
StructColumnVector result = (StructColumnVector) previousVector;
if(readPhase.contains(this.readerCategory)) {
super.nextVector(previousVector, isNull, batchSize, filterContext, readPhase);
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
result.isRepeating = false;
}
}
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
// Read all the members of struct as column vectors
boolean[] mask = result.noNulls ? null : result.isNull;
for (int f = 0; f < fields.length; f++) {
if (fields[f] != null && TypeReader.shouldProcessChild(fields[f], readPhase)) {
fields[f].nextVector(result.fields[f], mask, batchSize, filterContext, readPhase);
}
}
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
if (readPhase.contains(this.readerCategory)) {
super.startStripe(planner, readPhase);
}
for (TypeReader field : fields) {
if (field != null && TypeReader.shouldProcessChild(field, readPhase)) {
field.startStripe(planner, readPhase);
}
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
if (!readPhase.contains(this.readerCategory)) {
return;
}
items = countNonNulls(items);
for (TypeReader field : fields) {
if (field != null && TypeReader.shouldProcessChild(field, readPhase)) {
field.skipRows(items, readPhase);
}
}
}
}
public static class UnionTreeReader extends TreeReader {
protected final TypeReader[] fields;
protected RunLengthByteReader tags;
protected UnionTreeReader(int fileColumn,
TypeDescription readerSchema,
Context context) throws IOException {
super(fileColumn, null, context);
List<TypeDescription> childrenTypes = readerSchema.getChildren();
int fieldCount = childrenTypes.size();
this.fields = new TypeReader[fieldCount];
for (int i = 0; i < fieldCount; ++i) {
TypeDescription subtype = childrenTypes.get(i);
this.fields[i] = createTreeReader(subtype, context);
}
}
protected UnionTreeReader(int columnId, InStream present,
Context context,
OrcProto.ColumnEncoding encoding,
TypeReader[] childReaders) throws IOException {
super(columnId, present, context);
if (encoding != null) {
checkEncoding(encoding);
}
this.fields = childReaders;
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
if (readPhase.contains(this.readerCategory)) {
super.seek(index, readPhase);
tags.seek(index[columnId]);
}
for (TypeReader kid : fields) {
if (TypeReader.shouldProcessChild(kid, readPhase)) {
kid.seek(index, readPhase);
}
}
}
@Override
public void nextVector(ColumnVector previousVector,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
UnionColumnVector result = (UnionColumnVector) previousVector;
if (readPhase.contains(this.readerCategory)) {
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
result.isRepeating = false;
tags.nextVector(result.noNulls ? null : result.isNull, result.tags,
batchSize);
}
}
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
boolean[] ignore = new boolean[(int) batchSize];
for (int f = 0; f < result.fields.length; ++f) {
if (TypeReader.shouldProcessChild(fields[f], readPhase)) {
// build the ignore list for this tag
for (int r = 0; r < batchSize; ++r) {
ignore[r] = (!result.noNulls && result.isNull[r]) ||
result.tags[r] != f;
}
fields[f].nextVector(result.fields[f], ignore, batchSize, filterContext, readPhase);
}
}
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
if (readPhase.contains(this.readerCategory)) {
super.startStripe(planner, readPhase);
tags = new RunLengthByteReader(planner.getStream(
new StreamName(columnId, OrcProto.Stream.Kind.DATA)));
}
for (TypeReader field : fields) {
if (field != null && TypeReader.shouldProcessChild(field, readPhase)) {
field.startStripe(planner, readPhase);
}
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
if (!readPhase.contains(this.readerCategory)) {
return;
}
items = countNonNulls(items);
long[] counts = new long[fields.length];
for (int i = 0; i < items; ++i) {
counts[tags.next()] += 1;
}
for (int i = 0; i < counts.length; ++i) {
if (TypeReader.shouldProcessChild(fields[i], readPhase)) {
fields[i].skipRows(counts[i], readPhase);
}
}
}
}
private static final FilterContext NULL_FILTER = new FilterContext() {
@Override
public void reset() {
}
@Override
public boolean isSelectedInUse() {
return false;
}
@Override
public int[] getSelected() {
return new int[0];
}
@Override
public int getSelectedSize() {
return 0;
}
};
public static class ListTreeReader extends TreeReader {
protected final TypeReader elementReader;
protected IntegerReader lengths = null;
protected ListTreeReader(int fileColumn,
TypeDescription readerSchema,
Context context) throws IOException {
super(fileColumn, context);
TypeDescription elementType = readerSchema.getChildren().get(0);
elementReader = createTreeReader(elementType, context);
}
protected ListTreeReader(int columnId,
InStream present,
Context context,
InStream data,
OrcProto.ColumnEncoding encoding,
TypeReader elementReader) throws IOException {
super(columnId, present, context);
if (data != null && encoding != null) {
checkEncoding(encoding);
this.lengths = createIntegerReader(encoding.getKind(), data, false, context);
}
this.elementReader = elementReader;
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
lengths.seek(index[columnId]);
elementReader.seek(index, readPhase);
}
@Override
public void nextVector(ColumnVector previous,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
ListColumnVector result = (ListColumnVector) previous;
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
// if we have some none-null values, then read them
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
lengths.nextVector(result, result.lengths, batchSize);
// even with repeating lengths, the list doesn't repeat
result.isRepeating = false;
// build the offsets vector and figure out how many children to read
result.childCount = 0;
for (int r = 0; r < batchSize; ++r) {
if (result.noNulls || !result.isNull[r]) {
result.offsets[r] = result.childCount;
result.childCount += result.lengths[r];
}
}
result.child.ensureSize(result.childCount, false);
// We always read all of the children, because the parent filter wouldn't apply right.
elementReader.nextVector(result.child, null, result.childCount, NULL_FILTER, readPhase);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
lengths = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.LENGTH)), false, context);
if (elementReader != null) {
elementReader.startStripe(planner, readPhase);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
long childSkip = 0;
for (long i = 0; i < items; ++i) {
childSkip += lengths.next();
}
elementReader.skipRows(childSkip, readPhase);
}
}
public static class MapTreeReader extends TreeReader {
protected final TypeReader keyReader;
protected final TypeReader valueReader;
protected IntegerReader lengths = null;
protected MapTreeReader(int fileColumn,
TypeDescription readerSchema,
Context context) throws IOException {
super(fileColumn, context);
TypeDescription keyType = readerSchema.getChildren().get(0);
TypeDescription valueType = readerSchema.getChildren().get(1);
keyReader = createTreeReader(keyType, context);
valueReader = createTreeReader(valueType, context);
}
protected MapTreeReader(int columnId,
InStream present,
Context context,
InStream data,
OrcProto.ColumnEncoding encoding,
TypeReader keyReader,
TypeReader valueReader) throws IOException {
super(columnId, present, context);
if (data != null && encoding != null) {
checkEncoding(encoding);
this.lengths = createIntegerReader(encoding.getKind(), data, false, context);
}
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
public void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException {
super.seek(index, readPhase);
lengths.seek(index[columnId]);
keyReader.seek(index, readPhase);
valueReader.seek(index, readPhase);
}
@Override
public void nextVector(ColumnVector previous,
boolean[] isNull,
final int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException {
MapColumnVector result = (MapColumnVector) previous;
super.nextVector(result, isNull, batchSize, filterContext, readPhase);
if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
lengths.nextVector(result, result.lengths, batchSize);
// even with repeating lengths, the map doesn't repeat
result.isRepeating = false;
// build the offsets vector and figure out how many children to read
result.childCount = 0;
for (int r = 0; r < batchSize; ++r) {
if (result.noNulls || !result.isNull[r]) {
result.offsets[r] = result.childCount;
result.childCount += result.lengths[r];
}
}
result.keys.ensureSize(result.childCount, false);
result.values.ensureSize(result.childCount, false);
keyReader.nextVector(result.keys, null, result.childCount, NULL_FILTER, readPhase);
valueReader.nextVector(result.values, null, result.childCount, NULL_FILTER, readPhase);
}
}
@Override
public void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException {
if ((encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT) &&
(encoding.getKind() != OrcProto.ColumnEncoding.Kind.DIRECT_V2)) {
throw new IOException("Unknown encoding " + encoding + " in column " +
columnId);
}
}
@Override
public void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException {
super.startStripe(planner, readPhase);
lengths = createIntegerReader(planner.getEncoding(columnId).getKind(),
planner.getStream(new StreamName(columnId,
OrcProto.Stream.Kind.LENGTH)), false, context);
if (keyReader != null) {
keyReader.startStripe(planner, readPhase);
}
if (valueReader != null) {
valueReader.startStripe(planner, readPhase);
}
}
@Override
public void skipRows(long items, ReadPhase readPhase) throws IOException {
items = countNonNulls(items);
long childSkip = 0;
for (long i = 0; i < items; ++i) {
childSkip += lengths.next();
}
keyReader.skipRows(childSkip, readPhase);
valueReader.skipRows(childSkip, readPhase);
}
}
public static TypeReader createTreeReader(TypeDescription readerType,
Context context) throws IOException {
OrcFile.Version version = context.getFileFormat();
final SchemaEvolution evolution = context.getSchemaEvolution();
TypeDescription fileType = evolution.getFileType(readerType);
if (fileType == null || !evolution.includeReaderColumn(readerType.getId())){
return new NullTreeReader(-1, context);
}
TypeDescription.Category readerTypeCategory = readerType.getCategory();
// We skip attribute checks when comparing types since they are not used to
// create the ConvertTreeReaders
if (!fileType.equals(readerType, false) &&
(readerTypeCategory != TypeDescription.Category.STRUCT &&
readerTypeCategory != TypeDescription.Category.MAP &&
readerTypeCategory != TypeDescription.Category.LIST &&
readerTypeCategory != TypeDescription.Category.UNION)) {
// We only convert complex children.
return ConvertTreeReaderFactory.createConvertTreeReader(readerType, context);
}
switch (readerTypeCategory) {
case BOOLEAN:
return new BooleanTreeReader(fileType.getId(), context);
case BYTE:
return new ByteTreeReader(fileType.getId(), context);
case DOUBLE:
return new DoubleTreeReader(fileType.getId(), context);
case FLOAT:
return new FloatTreeReader(fileType.getId(), context);
case SHORT:
return new ShortTreeReader(fileType.getId(), context);
case INT:
return new IntTreeReader(fileType.getId(), context);
case LONG:
return new LongTreeReader(fileType.getId(), context);
case STRING:
return new StringTreeReader(fileType.getId(), context);
case CHAR:
return new CharTreeReader(fileType.getId(), readerType.getMaxLength(), context);
case VARCHAR:
return new VarcharTreeReader(fileType.getId(), readerType.getMaxLength(), context);
case BINARY:
return new BinaryTreeReader(fileType.getId(), context);
case TIMESTAMP:
return new TimestampTreeReader(fileType.getId(), context, false);
case TIMESTAMP_INSTANT:
return new TimestampTreeReader(fileType.getId(), context, true);
case DATE:
return new DateTreeReader(fileType.getId(), context);
case DECIMAL:
if (isDecimalAsLong(version, fileType.getPrecision())){
return new Decimal64TreeReader(fileType.getId(), fileType.getPrecision(),
fileType.getScale(), context);
}
return new DecimalTreeReader(fileType.getId(), fileType.getPrecision(),
fileType.getScale(), context);
case STRUCT:
return new StructTreeReader(fileType.getId(), readerType, context);
case LIST:
return new ListTreeReader(fileType.getId(), readerType, context);
case MAP:
return new MapTreeReader(fileType.getId(), readerType, context);
case UNION:
return new UnionTreeReader(fileType.getId(), readerType, context);
default:
throw new IllegalArgumentException("Unsupported type " +
readerTypeCategory);
}
}
public static boolean isDecimalAsLong(OrcFile.Version version, int precision) {
return version == OrcFile.Version.UNSTABLE_PRE_2_0 &&
precision <= TypeDescription.MAX_DECIMAL64_PRECISION;
}
public static BatchReader createRootReader(TypeDescription readerType, Context context)
throws IOException {
TypeReader reader = createTreeReader(readerType, context);
if (reader instanceof StructTreeReader) {
return new StructBatchReader(reader, context);
} else {
return new PrimitiveBatchReader(reader);
}
}
}
| 113,631 | 36.03781 | 106 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/TypeUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.orc.TypeDescription;
import java.util.List;
public class TypeUtils {
private TypeUtils() {}
public static ColumnVector createColumn(TypeDescription schema,
TypeDescription.RowBatchVersion version,
int maxSize) {
switch (schema.getCategory()) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
return new LongColumnVector(maxSize);
case DATE:
return new DateColumnVector(maxSize);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampColumnVector(maxSize);
case FLOAT:
case DOUBLE:
return new DoubleColumnVector(maxSize);
case DECIMAL: {
int precision = schema.getPrecision();
int scale = schema.getScale();
if (version == TypeDescription.RowBatchVersion.ORIGINAL ||
precision > TypeDescription.MAX_DECIMAL64_PRECISION) {
return new DecimalColumnVector(maxSize, precision, scale);
} else {
return new Decimal64ColumnVector(maxSize, precision, scale);
}
}
case STRING:
case BINARY:
case CHAR:
case VARCHAR:
return new BytesColumnVector(maxSize);
case STRUCT: {
List<TypeDescription> children = schema.getChildren();
ColumnVector[] fieldVector = new ColumnVector[children.size()];
for(int i=0; i < fieldVector.length; ++i) {
fieldVector[i] = createColumn(children.get(i), version, maxSize);
}
return new StructColumnVector(maxSize,
fieldVector);
}
case UNION: {
List<TypeDescription> children = schema.getChildren();
ColumnVector[] fieldVector = new ColumnVector[children.size()];
for(int i=0; i < fieldVector.length; ++i) {
fieldVector[i] = createColumn(children.get(i), version, maxSize);
}
return new UnionColumnVector(maxSize,
fieldVector);
}
case LIST: {
List<TypeDescription> children = schema.getChildren();
return new ListColumnVector(maxSize,
createColumn(children.get(0), version, maxSize));
}
case MAP: {
List<TypeDescription> children = schema.getChildren();
return new MapColumnVector(maxSize,
createColumn(children.get(0), version, maxSize),
createColumn(children.get(1), version, maxSize));
}
default:
throw new IllegalArgumentException("Unknown type " + schema.getCategory());
}
}
}
| 4,205 | 37.944444 | 83 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/Utf8Utils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.nio.charset.StandardCharsets;
public final class Utf8Utils {
public static int charLength(byte[] data, int offset, int length) {
int chars = 0;
for (int i = 0; i < length; i++) {
if (isUtfStartByte(data[offset +i ])) {
chars++;
}
}
return chars;
}
/**
* Return the number of bytes required to read at most
* maxLength characters in full from a utf-8 encoded byte array provided
* by data[offset:offset+length]. This does not validate utf-8 data, but
* operates correctly on already valid utf-8 data.
*
* @param maxCharLength
* @param data
* @param offset
* @param length
*/
public static int truncateBytesTo(int maxCharLength, byte[] data, int offset, int length) {
int chars = 0;
if (length <= maxCharLength) {
return length;
}
for (int i = 0; i < length; i++) {
if (isUtfStartByte(data[offset +i ])) {
chars++;
}
if (chars > maxCharLength) {
return i;
}
}
// everything fits
return length;
}
/**
* Checks if b is the first byte of a UTF-8 character.
*
*/
public static boolean isUtfStartByte(byte b) {
return (b & 0xC0) != 0x80;
}
/**
* Find the start of the last character that ends in the current string.
* @param text the bytes of the utf-8
* @param from the first byte location
* @param until the last byte location
* @return the index of the last character
*/
public static int findLastCharacter(byte[] text, int from, int until) {
int posn = until;
/* we don't expect characters more than 5 bytes */
while (posn >= from) {
if (isUtfStartByte(text[posn])) {
return posn;
}
posn -= 1;
}
/* beginning of a valid char not found */
throw new IllegalArgumentException(
"Could not truncate string, beginning of a valid char not found");
}
/**
* Get the code point at a given location in the byte array.
* @param source the bytes of the string
* @param from the offset to start at
* @param len the number of bytes in the character
* @return the code point
*/
public static int getCodePoint(byte[] source, int from, int len) {
return new String(source, from, len, StandardCharsets.UTF_8)
.codePointAt(0);
}
}
| 3,151 | 29.019048 | 93 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/VisitorContextImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.io.OutputStream;
/**
* Base implementation for {@link org.apache.orc.impl.Dictionary.VisitorContext} used to traversing
* all nodes in a dictionary.
*/
public class VisitorContextImpl implements Dictionary.VisitorContext {
private int originalPosition;
private int start;
private int end;
private final DynamicIntArray keyOffsets;
private final DynamicByteArray byteArray;
private final Text text = new Text();
public VisitorContextImpl(DynamicByteArray byteArray, DynamicIntArray keyOffsets) {
this.byteArray = byteArray;
this.keyOffsets = keyOffsets;
}
@Override
public int getOriginalPosition() {
return originalPosition;
}
@Override
public Text getText() {
byteArray.setText(text, start, end - start);
return text;
}
@Override
public void writeBytes(OutputStream out)
throws IOException {
byteArray.write(out, start, end - start);
}
@Override
public int getLength() {
return end - start;
}
public void setPosition(int position) {
originalPosition = position;
start = keyOffsets.get(originalPosition);
if (position + 1 == keyOffsets.size()) {
end = byteArray.size();
} else {
end = keyOffsets.get(originalPosition + 1);
}
}
}
| 2,162 | 27.460526 | 99 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/WriterImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.protobuf.ByteString;
import io.airlift.compress.lz4.Lz4Compressor;
import io.airlift.compress.lz4.Lz4Decompressor;
import io.airlift.compress.lzo.LzoCompressor;
import io.airlift.compress.lzo.LzoDecompressor;
import io.airlift.compress.zstd.ZstdCompressor;
import io.airlift.compress.zstd.ZstdDecompressor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import org.apache.orc.DataMask;
import org.apache.orc.MemoryManager;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.impl.writer.TreeWriter;
import org.apache.orc.impl.writer.WriterContext;
import org.apache.orc.impl.writer.WriterEncryptionKey;
import org.apache.orc.impl.writer.WriterEncryptionVariant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TimeZone;
import java.util.TreeMap;
/**
* An ORC file writer. The file is divided into stripes, which is the natural
* unit of work when reading. Each stripe is buffered in memory until the
* memory reaches the stripe size and then it is written out broken down by
* columns. Each column is written by a TreeWriter that is specific to that
* type of column. TreeWriters may have children TreeWriters that handle the
* sub-types. Each of the TreeWriters writes the column's data as a set of
* streams.
* <p>
* This class is unsynchronized like most Stream objects, so from the creation
* of an OrcFile and all access to a single instance has to be from a single
* thread.
* <p>
* There are no known cases where these happen between different threads today.
* <p>
* Caveat: the MemoryManager is created during WriterOptions create, that has
* to be confined to a single thread as well.
*
*/
public class WriterImpl implements WriterInternal, MemoryManager.Callback {
private static final Logger LOG = LoggerFactory.getLogger(WriterImpl.class);
private static final int MIN_ROW_INDEX_STRIDE = 1000;
private final Path path;
private final long stripeSize;
private final long stripeRowCount;
private final int rowIndexStride;
private final TypeDescription schema;
private final PhysicalWriter physicalWriter;
private final OrcFile.WriterVersion writerVersion;
private final StreamOptions unencryptedOptions;
private long rowCount = 0;
private long rowsInStripe = 0;
private long rawDataSize = 0;
private int rowsInIndex = 0;
private long lastFlushOffset = 0;
private int stripesAtLastFlush = -1;
private final List<OrcProto.StripeInformation> stripes =
new ArrayList<>();
private final Map<String, ByteString> userMetadata =
new TreeMap<>();
private final TreeWriter treeWriter;
private final boolean buildIndex;
private final MemoryManager memoryManager;
private long previousAllocation = -1;
private long memoryLimit;
private final long ROWS_PER_CHECK;
private long rowsSinceCheck = 0;
private final OrcFile.Version version;
private final Configuration conf;
private final OrcFile.WriterCallback callback;
private final OrcFile.WriterContext callbackContext;
private final OrcFile.EncodingStrategy encodingStrategy;
private final OrcFile.CompressionStrategy compressionStrategy;
private final boolean[] bloomFilterColumns;
private final double bloomFilterFpp;
private final OrcFile.BloomFilterVersion bloomFilterVersion;
private final boolean writeTimeZone;
private final boolean useUTCTimeZone;
private final double dictionaryKeySizeThreshold;
private final boolean[] directEncodingColumns;
private final List<OrcProto.ColumnEncoding> unencryptedEncodings =
new ArrayList<>();
// the list of maskDescriptions, keys, and variants
private SortedMap<String, MaskDescriptionImpl> maskDescriptions = new TreeMap<>();
private SortedMap<String, WriterEncryptionKey> keys = new TreeMap<>();
private final WriterEncryptionVariant[] encryption;
// the mapping of columns to maskDescriptions
private final MaskDescriptionImpl[] columnMaskDescriptions;
// the mapping of columns to EncryptionVariants
private final WriterEncryptionVariant[] columnEncryption;
private KeyProvider keyProvider;
// do we need to include the current encryption keys in the next stripe
// information
private boolean needKeyFlush;
private final boolean useProlepticGregorian;
private boolean isClose = false;
public WriterImpl(FileSystem fs,
Path path,
OrcFile.WriterOptions opts) throws IOException {
this.path = path;
this.conf = opts.getConfiguration();
// clone it so that we can annotate it with encryption
this.schema = opts.getSchema().clone();
int numColumns = schema.getMaximumId() + 1;
if (!opts.isEnforceBufferSize()) {
opts.bufferSize(getEstimatedBufferSize(opts.getStripeSize(), numColumns,
opts.getBufferSize()));
}
// Annotate the schema with the column encryption
schema.annotateEncryption(opts.getEncryption(), opts.getMasks());
columnEncryption = new WriterEncryptionVariant[numColumns];
columnMaskDescriptions = new MaskDescriptionImpl[numColumns];
encryption = setupEncryption(opts.getKeyProvider(), schema,
opts.getKeyOverrides());
needKeyFlush = encryption.length > 0;
this.directEncodingColumns = OrcUtils.includeColumns(
opts.getDirectEncodingColumns(), opts.getSchema());
dictionaryKeySizeThreshold =
OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getDouble(conf);
this.callback = opts.getCallback();
if (callback != null) {
callbackContext = () -> WriterImpl.this;
} else {
callbackContext = null;
}
this.useProlepticGregorian = opts.getProlepticGregorian();
this.writeTimeZone = hasTimestamp(schema);
this.useUTCTimeZone = opts.getUseUTCTimestamp();
this.encodingStrategy = opts.getEncodingStrategy();
this.compressionStrategy = opts.getCompressionStrategy();
// ORC-1362: if isBuildIndex=false, then rowIndexStride will be set to 0.
if (opts.getRowIndexStride() >= 0 && opts.isBuildIndex()) {
this.rowIndexStride = opts.getRowIndexStride();
} else {
this.rowIndexStride = 0;
}
this.buildIndex = rowIndexStride > 0;
if (buildIndex && rowIndexStride < MIN_ROW_INDEX_STRIDE) {
throw new IllegalArgumentException("Row stride must be at least " +
MIN_ROW_INDEX_STRIDE);
}
this.writerVersion = opts.getWriterVersion();
this.version = opts.getVersion();
if (version == OrcFile.Version.FUTURE) {
throw new IllegalArgumentException("Can not write in a unknown version.");
} else if (version == OrcFile.Version.UNSTABLE_PRE_2_0) {
LOG.warn("ORC files written in " + version.getName() + " will not be" +
" readable by other versions of the software. It is only for" +
" developer testing.");
}
this.bloomFilterVersion = opts.getBloomFilterVersion();
this.bloomFilterFpp = opts.getBloomFilterFpp();
/* do not write bloom filters for ORC v11 */
if (!buildIndex || version == OrcFile.Version.V_0_11) {
this.bloomFilterColumns = new boolean[schema.getMaximumId() + 1];
} else {
this.bloomFilterColumns =
OrcUtils.includeColumns(opts.getBloomFilterColumns(), schema);
}
// ensure that we are able to handle callbacks before we register ourselves
ROWS_PER_CHECK = Math.min(opts.getStripeRowCountValue(),
OrcConf.ROWS_BETWEEN_CHECKS.getLong(conf));
this.stripeRowCount= opts.getStripeRowCountValue();
this.stripeSize = opts.getStripeSize();
memoryLimit = stripeSize;
memoryManager = opts.getMemoryManager();
memoryManager.addWriter(path, stripeSize, this);
// Set up the physical writer
this.physicalWriter = opts.getPhysicalWriter() == null ?
new PhysicalFsWriter(fs, path, opts, encryption) :
opts.getPhysicalWriter();
physicalWriter.writeHeader();
unencryptedOptions = physicalWriter.getStreamOptions();
OutStream.assertBufferSizeValid(unencryptedOptions.getBufferSize());
treeWriter = TreeWriter.Factory.create(schema, null, new StreamFactory());
LOG.info("ORC writer created for path: {} with stripeSize: {} options: {}",
path, stripeSize, unencryptedOptions);
}
//@VisibleForTesting
public static int getEstimatedBufferSize(long stripeSize, int numColumns,
int bs) {
// The worst case is that there are 2 big streams per a column and
// we want to guarantee that each stream gets ~10 buffers.
// This keeps buffers small enough that we don't get really small stripe
// sizes.
int estBufferSize = (int) (stripeSize / (20L * numColumns));
estBufferSize = getClosestBufferSize(estBufferSize);
return Math.min(estBufferSize, bs);
}
@Override
public void increaseCompressionSize(int newSize) {
if (newSize > unencryptedOptions.getBufferSize()) {
unencryptedOptions.bufferSize(newSize);
}
}
/**
* Given a buffer size, return the nearest superior power of 2. Min value is
* 4Kib, Max value is 256Kib.
*
* @param size Proposed buffer size
* @return the suggested buffer size
*/
private static int getClosestBufferSize(int size) {
final int kb4 = 4 * 1024;
final int kb256 = 256 * 1024;
final int pow2 = size == 1 ? 1 : Integer.highestOneBit(size - 1) * 2;
return Math.min(kb256, Math.max(kb4, pow2));
}
public static CompressionCodec createCodec(CompressionKind kind) {
switch (kind) {
case NONE:
return null;
case ZLIB:
return new ZlibCodec();
case SNAPPY:
return new SnappyCodec();
case LZO:
return new AircompressorCodec(kind, new LzoCompressor(),
new LzoDecompressor());
case LZ4:
return new AircompressorCodec(kind, new Lz4Compressor(),
new Lz4Decompressor());
case ZSTD:
return new AircompressorCodec(kind, new ZstdCompressor(),
new ZstdDecompressor());
default:
throw new IllegalArgumentException("Unknown compression codec: " +
kind);
}
}
@Override
public boolean checkMemory(double newScale) throws IOException {
memoryLimit = Math.round(stripeSize * newScale);
return checkMemory();
}
private boolean checkMemory() throws IOException {
if (rowsSinceCheck >= ROWS_PER_CHECK) {
rowsSinceCheck = 0;
long size = treeWriter.estimateMemory();
if (LOG.isDebugEnabled()) {
LOG.debug("ORC writer " + physicalWriter + " size = " + size +
" memoryLimit = " + memoryLimit + " rowsInStripe = " + rowsInStripe +
" stripeRowCountLimit = " + stripeRowCount);
}
if (size > memoryLimit || rowsInStripe >= stripeRowCount) {
flushStripe();
return true;
}
}
return false;
}
/**
* Interface from the Writer to the TreeWriters. This limits the visibility
* that the TreeWriters have into the Writer.
*/
private class StreamFactory implements WriterContext {
/**
* Create a stream to store part of a column.
* @param name the name for the stream
* @return The output outStream that the section needs to be written to.
*/
@Override
public OutStream createStream(StreamName name) throws IOException {
StreamOptions options = SerializationUtils.getCustomizedCodec(
unencryptedOptions, compressionStrategy, name.getKind());
WriterEncryptionVariant encryption =
(WriterEncryptionVariant) name.getEncryption();
if (encryption != null) {
if (options == unencryptedOptions) {
options = new StreamOptions(options);
}
options.withEncryption(encryption.getKeyDescription().getAlgorithm(),
encryption.getFileFooterKey())
.modifyIv(CryptoUtils.modifyIvForStream(name, 1));
}
return new OutStream(name, options, physicalWriter.createDataStream(name));
}
/**
* Get the stride rate of the row index.
*/
@Override
public int getRowIndexStride() {
return rowIndexStride;
}
/**
* Should be building the row index.
* @return true if we are building the index
*/
@Override
public boolean buildIndex() {
return buildIndex;
}
/**
* Is the ORC file compressed?
* @return are the streams compressed
*/
@Override
public boolean isCompressed() {
return unencryptedOptions.getCodec() != null;
}
/**
* Get the encoding strategy to use.
* @return encoding strategy
*/
@Override
public OrcFile.EncodingStrategy getEncodingStrategy() {
return encodingStrategy;
}
/**
* Get the bloom filter columns
* @return bloom filter columns
*/
@Override
public boolean[] getBloomFilterColumns() {
return bloomFilterColumns;
}
/**
* Get bloom filter false positive percentage.
* @return fpp
*/
@Override
public double getBloomFilterFPP() {
return bloomFilterFpp;
}
/**
* Get the writer's configuration.
* @return configuration
*/
@Override
public Configuration getConfiguration() {
return conf;
}
/**
* Get the version of the file to write.
*/
@Override
public OrcFile.Version getVersion() {
return version;
}
/**
* Get the PhysicalWriter.
*
* @return the file's physical writer.
*/
@Override
public PhysicalWriter getPhysicalWriter() {
return physicalWriter;
}
@Override
public OrcFile.BloomFilterVersion getBloomFilterVersion() {
return bloomFilterVersion;
}
@Override
public void writeIndex(StreamName name,
OrcProto.RowIndex.Builder index) throws IOException {
physicalWriter.writeIndex(name, index);
}
@Override
public void writeBloomFilter(StreamName name,
OrcProto.BloomFilterIndex.Builder bloom
) throws IOException {
physicalWriter.writeBloomFilter(name, bloom);
}
@Override
public WriterEncryptionVariant getEncryption(int columnId) {
return columnId < columnEncryption.length ?
columnEncryption[columnId] : null;
}
@Override
public DataMask getUnencryptedMask(int columnId) {
if (columnMaskDescriptions != null) {
MaskDescriptionImpl descr = columnMaskDescriptions[columnId];
if (descr != null) {
return DataMask.Factory.build(descr, schema.findSubtype(columnId),
(type) -> columnMaskDescriptions[type.getId()]);
}
}
return null;
}
@Override
public void setEncoding(int column, WriterEncryptionVariant encryption,
OrcProto.ColumnEncoding encoding) {
if (encryption == null) {
unencryptedEncodings.add(encoding);
} else {
encryption.addEncoding(encoding);
}
}
@Override
public void writeStatistics(StreamName name,
OrcProto.ColumnStatistics.Builder stats
) throws IOException {
physicalWriter.writeStatistics(name, stats);
}
@Override
public boolean getUseUTCTimestamp() {
return useUTCTimeZone;
}
@Override
public double getDictionaryKeySizeThreshold(int columnId) {
return directEncodingColumns[columnId] ? 0.0 : dictionaryKeySizeThreshold;
}
@Override
public boolean getProlepticGregorian() {
return useProlepticGregorian;
}
}
private static void writeTypes(OrcProto.Footer.Builder builder,
TypeDescription schema) {
builder.addAllTypes(OrcUtils.getOrcTypes(schema));
}
private void createRowIndexEntry() throws IOException {
treeWriter.createRowIndexEntry();
rowsInIndex = 0;
}
/**
* Write the encrypted keys into the StripeInformation along with the
* stripe id, so that the readers can decrypt the data.
* @param dirEntry the entry to modify
*/
private void addEncryptedKeys(OrcProto.StripeInformation.Builder dirEntry) {
for(WriterEncryptionVariant variant: encryption) {
dirEntry.addEncryptedLocalKeys(ByteString.copyFrom(
variant.getMaterial().getEncryptedKey()));
}
dirEntry.setEncryptStripeId(1 + stripes.size());
}
private void flushStripe() throws IOException {
if (buildIndex && rowsInIndex != 0) {
createRowIndexEntry();
}
if (rowsInStripe != 0) {
if (callback != null) {
callback.preStripeWrite(callbackContext);
}
// finalize the data for the stripe
int requiredIndexEntries = rowIndexStride == 0 ? 0 :
(int) ((rowsInStripe + rowIndexStride - 1) / rowIndexStride);
OrcProto.StripeFooter.Builder builder =
OrcProto.StripeFooter.newBuilder();
if (writeTimeZone) {
if (useUTCTimeZone) {
builder.setWriterTimezone("UTC");
} else {
builder.setWriterTimezone(TimeZone.getDefault().getID());
}
}
treeWriter.flushStreams();
treeWriter.writeStripe(requiredIndexEntries);
// update the encodings
builder.addAllColumns(unencryptedEncodings);
unencryptedEncodings.clear();
for (WriterEncryptionVariant writerEncryptionVariant : encryption) {
OrcProto.StripeEncryptionVariant.Builder encrypt =
OrcProto.StripeEncryptionVariant.newBuilder();
encrypt.addAllEncoding(writerEncryptionVariant.getEncodings());
writerEncryptionVariant.clearEncodings();
builder.addEncryption(encrypt);
}
OrcProto.StripeInformation.Builder dirEntry =
OrcProto.StripeInformation.newBuilder()
.setNumberOfRows(rowsInStripe);
if (encryption.length > 0 && needKeyFlush) {
addEncryptedKeys(dirEntry);
needKeyFlush = false;
}
physicalWriter.finalizeStripe(builder, dirEntry);
stripes.add(dirEntry.build());
rowCount += rowsInStripe;
rowsInStripe = 0;
}
}
private long computeRawDataSize() {
return treeWriter.getRawDataSize();
}
private OrcProto.CompressionKind writeCompressionKind(CompressionKind kind) {
switch (kind) {
case NONE: return OrcProto.CompressionKind.NONE;
case ZLIB: return OrcProto.CompressionKind.ZLIB;
case SNAPPY: return OrcProto.CompressionKind.SNAPPY;
case LZO: return OrcProto.CompressionKind.LZO;
case LZ4: return OrcProto.CompressionKind.LZ4;
case ZSTD: return OrcProto.CompressionKind.ZSTD;
default:
throw new IllegalArgumentException("Unknown compression " + kind);
}
}
private void writeMetadata() throws IOException {
// The physical writer now has the stripe statistics, so we pass a
// new builder in here.
physicalWriter.writeFileMetadata(OrcProto.Metadata.newBuilder());
}
private long writePostScript() throws IOException {
OrcProto.PostScript.Builder builder =
OrcProto.PostScript.newBuilder()
.setMagic(OrcFile.MAGIC)
.addVersion(version.getMajor())
.addVersion(version.getMinor())
.setWriterVersion(writerVersion.getId());
CompressionCodec codec = unencryptedOptions.getCodec();
if (codec == null) {
builder.setCompression(OrcProto.CompressionKind.NONE);
} else {
builder.setCompression(writeCompressionKind(codec.getKind()))
.setCompressionBlockSize(unencryptedOptions.getBufferSize());
}
return physicalWriter.writePostScript(builder);
}
private OrcProto.EncryptionKey.Builder writeEncryptionKey(WriterEncryptionKey key) {
OrcProto.EncryptionKey.Builder result = OrcProto.EncryptionKey.newBuilder();
HadoopShims.KeyMetadata meta = key.getMetadata();
result.setKeyName(meta.getKeyName());
result.setKeyVersion(meta.getVersion());
result.setAlgorithm(OrcProto.EncryptionAlgorithm.valueOf(
meta.getAlgorithm().getSerialization()));
return result;
}
private OrcProto.EncryptionVariant.Builder
writeEncryptionVariant(WriterEncryptionVariant variant) {
OrcProto.EncryptionVariant.Builder result =
OrcProto.EncryptionVariant.newBuilder();
result.setRoot(variant.getRoot().getId());
result.setKey(variant.getKeyDescription().getId());
result.setEncryptedKey(ByteString.copyFrom(variant.getMaterial().getEncryptedKey()));
return result;
}
private OrcProto.Encryption.Builder writeEncryptionFooter() {
OrcProto.Encryption.Builder encrypt = OrcProto.Encryption.newBuilder();
for(MaskDescriptionImpl mask: maskDescriptions.values()) {
OrcProto.DataMask.Builder maskBuilder = OrcProto.DataMask.newBuilder();
maskBuilder.setName(mask.getName());
for(String param: mask.getParameters()) {
maskBuilder.addMaskParameters(param);
}
for(TypeDescription column: mask.getColumns()) {
maskBuilder.addColumns(column.getId());
}
encrypt.addMask(maskBuilder);
}
for(WriterEncryptionKey key: keys.values()) {
encrypt.addKey(writeEncryptionKey(key));
}
for(WriterEncryptionVariant variant: encryption) {
encrypt.addVariants(writeEncryptionVariant(variant));
}
encrypt.setKeyProvider(OrcProto.KeyProviderKind.valueOf(
keyProvider.getKind().getValue()));
return encrypt;
}
private long writeFooter() throws IOException {
writeMetadata();
OrcProto.Footer.Builder builder = OrcProto.Footer.newBuilder();
builder.setNumberOfRows(rowCount);
builder.setRowIndexStride(rowIndexStride);
rawDataSize = computeRawDataSize();
// serialize the types
writeTypes(builder, schema);
builder.setCalendar(useProlepticGregorian
? OrcProto.CalendarKind.PROLEPTIC_GREGORIAN
: OrcProto.CalendarKind.JULIAN_GREGORIAN);
// add the stripe information
for(OrcProto.StripeInformation stripe: stripes) {
builder.addStripes(stripe);
}
// add the column statistics
treeWriter.writeFileStatistics();
// add all of the user metadata
for(Map.Entry<String, ByteString> entry: userMetadata.entrySet()) {
builder.addMetadata(OrcProto.UserMetadataItem.newBuilder()
.setName(entry.getKey()).setValue(entry.getValue()));
}
if (encryption.length > 0) {
builder.setEncryption(writeEncryptionFooter());
}
builder.setWriter(OrcFile.WriterImplementation.ORC_JAVA.getId());
builder.setSoftwareVersion(OrcUtils.getOrcVersion());
physicalWriter.writeFileFooter(builder);
return writePostScript();
}
@Override
public TypeDescription getSchema() {
return schema;
}
@Override
public void addUserMetadata(String name, ByteBuffer value) {
userMetadata.put(name, ByteString.copyFrom(value));
}
@Override
public void addRowBatch(VectorizedRowBatch batch) throws IOException {
try {
// If this is the first set of rows in this stripe, tell the tree writers
// to prepare the stripe.
if (batch.size != 0 && rowsInStripe == 0) {
treeWriter.prepareStripe(stripes.size() + 1);
}
if (buildIndex) {
// Batch the writes up to the rowIndexStride so that we can get the
// right size indexes.
int posn = 0;
while (posn < batch.size) {
int chunkSize = Math.min(batch.size - posn,
rowIndexStride - rowsInIndex);
if (batch.isSelectedInUse()) {
// find the longest chunk that is continuously selected from posn
for (int len = 1; len < chunkSize; ++len) {
if (batch.selected[posn + len] - batch.selected[posn] != len) {
chunkSize = len;
break;
}
}
treeWriter.writeRootBatch(batch, batch.selected[posn], chunkSize);
} else {
treeWriter.writeRootBatch(batch, posn, chunkSize);
}
posn += chunkSize;
rowsInIndex += chunkSize;
rowsInStripe += chunkSize;
if (rowsInIndex >= rowIndexStride) {
createRowIndexEntry();
}
}
} else {
if (batch.isSelectedInUse()) {
int posn = 0;
while (posn < batch.size) {
int chunkSize = 1;
while (posn + chunkSize < batch.size) {
// find the longest chunk that is continuously selected from posn
if (batch.selected[posn + chunkSize] - batch.selected[posn] != chunkSize) {
break;
}
++chunkSize;
}
treeWriter.writeRootBatch(batch, batch.selected[posn], chunkSize);
posn += chunkSize;
}
} else {
treeWriter.writeRootBatch(batch, 0, batch.size);
}
rowsInStripe += batch.size;
}
rowsSinceCheck += batch.size;
previousAllocation = memoryManager.checkMemory(previousAllocation, this);
checkMemory();
} catch (Throwable t) {
try {
close();
} catch (Throwable ignore) {
// ignore
}
if (t instanceof IOException) {
throw (IOException) t;
} else {
throw new IOException("Problem adding row to " + path, t);
}
}
}
@Override
public void close() throws IOException {
if (!isClose) {
try {
if (callback != null) {
callback.preFooterWrite(callbackContext);
}
// remove us from the memory manager so that we don't get any callbacks
memoryManager.removeWriter(path);
// actually close the file
flushStripe();
lastFlushOffset = writeFooter();
physicalWriter.close();
} finally {
isClose = true;
}
}
}
/**
* Raw data size will be compute when writing the file footer. Hence raw data
* size value will be available only after closing the writer.
*/
@Override
public long getRawDataSize() {
return rawDataSize;
}
/**
* Row count gets updated when flushing the stripes. To get accurate row
* count call this method after writer is closed.
*/
@Override
public long getNumberOfRows() {
return rowCount;
}
@Override
public long writeIntermediateFooter() throws IOException {
// flush any buffered rows
flushStripe();
// write a footer
if (stripesAtLastFlush != stripes.size()) {
if (callback != null) {
callback.preFooterWrite(callbackContext);
}
lastFlushOffset = writeFooter();
stripesAtLastFlush = stripes.size();
physicalWriter.flush();
}
return lastFlushOffset;
}
private static void checkArgument(boolean expression, String message) {
if (!expression) {
throw new IllegalArgumentException(message);
}
}
@Override
public void appendStripe(byte[] stripe, int offset, int length,
StripeInformation stripeInfo,
OrcProto.StripeStatistics stripeStatistics
) throws IOException {
appendStripe(stripe, offset, length, stripeInfo,
new StripeStatistics[]{
new StripeStatisticsImpl(schema, stripeStatistics.getColStatsList(),
false, false)});
}
@Override
public void appendStripe(byte[] stripe, int offset, int length,
StripeInformation stripeInfo,
StripeStatistics[] stripeStatistics
) throws IOException {
checkArgument(stripe != null, "Stripe must not be null");
checkArgument(length <= stripe.length,
"Specified length must not be greater specified array length");
checkArgument(stripeInfo != null, "Stripe information must not be null");
checkArgument(stripeStatistics != null,
"Stripe statistics must not be null");
// If we have buffered rows, flush them
if (rowsInStripe > 0) {
flushStripe();
}
rowsInStripe = stripeInfo.getNumberOfRows();
// update stripe information
OrcProto.StripeInformation.Builder dirEntry =
OrcProto.StripeInformation.newBuilder()
.setNumberOfRows(rowsInStripe)
.setIndexLength(stripeInfo.getIndexLength())
.setDataLength(stripeInfo.getDataLength())
.setFooterLength(stripeInfo.getFooterLength());
// If this is the first stripe of the original file, we need to copy the
// encryption information.
if (stripeInfo.hasEncryptionStripeId()) {
dirEntry.setEncryptStripeId(stripeInfo.getEncryptionStripeId());
for(byte[] key: stripeInfo.getEncryptedLocalKeys()) {
dirEntry.addEncryptedLocalKeys(ByteString.copyFrom(key));
}
}
physicalWriter.appendRawStripe(ByteBuffer.wrap(stripe, offset, length),
dirEntry);
// since we have already written the stripe, just update stripe statistics
treeWriter.addStripeStatistics(stripeStatistics);
stripes.add(dirEntry.build());
// reset it after writing the stripe
rowCount += rowsInStripe;
rowsInStripe = 0;
needKeyFlush = encryption.length > 0;
}
@Override
public void appendUserMetadata(List<OrcProto.UserMetadataItem> userMetadata) {
if (userMetadata != null) {
for (OrcProto.UserMetadataItem item : userMetadata) {
this.userMetadata.put(item.getName(), item.getValue());
}
}
}
@Override
public ColumnStatistics[] getStatistics() {
// get the column statistics
final ColumnStatistics[] result =
new ColumnStatistics[schema.getMaximumId() + 1];
// Get the file statistics, preferring the encrypted one.
treeWriter.getCurrentStatistics(result);
return result;
}
@Override
public List<StripeInformation> getStripes() throws IOException {
return Collections.unmodifiableList(OrcUtils.convertProtoStripesToStripes(stripes));
}
public CompressionCodec getCompressionCodec() {
return unencryptedOptions.getCodec();
}
private static boolean hasTimestamp(TypeDescription schema) {
if (schema.getCategory() == TypeDescription.Category.TIMESTAMP) {
return true;
}
List<TypeDescription> children = schema.getChildren();
if (children != null) {
for (TypeDescription child : children) {
if (hasTimestamp(child)) {
return true;
}
}
}
return false;
}
private WriterEncryptionKey getKey(String keyName,
KeyProvider provider) throws IOException {
WriterEncryptionKey result = keys.get(keyName);
if (result == null) {
result = new WriterEncryptionKey(provider.getCurrentKeyVersion(keyName));
keys.put(keyName, result);
}
return result;
}
private MaskDescriptionImpl getMask(String maskString) {
// if it is already there, get the earlier object
MaskDescriptionImpl result = maskDescriptions.get(maskString);
if (result == null) {
result = ParserUtils.buildMaskDescription(maskString);
maskDescriptions.put(maskString, result);
}
return result;
}
private int visitTypeTree(TypeDescription schema,
boolean encrypted,
KeyProvider provider) throws IOException {
int result = 0;
String keyName = schema.getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE);
String maskName = schema.getAttributeValue(TypeDescription.MASK_ATTRIBUTE);
if (keyName != null) {
if (provider == null) {
throw new IllegalArgumentException("Encryption requires a KeyProvider.");
}
if (encrypted) {
throw new IllegalArgumentException("Nested encryption type: " + schema);
}
encrypted = true;
result += 1;
WriterEncryptionKey key = getKey(keyName, provider);
HadoopShims.KeyMetadata metadata = key.getMetadata();
WriterEncryptionVariant variant = new WriterEncryptionVariant(key,
schema, provider.createLocalKey(metadata));
key.addRoot(variant);
}
if (encrypted && (keyName != null || maskName != null)) {
MaskDescriptionImpl mask = getMask(maskName == null ? "nullify" : maskName);
mask.addColumn(schema);
}
List<TypeDescription> children = schema.getChildren();
if (children != null) {
for(TypeDescription child: children) {
result += visitTypeTree(child, encrypted, provider);
}
}
return result;
}
/**
* Iterate through the encryption options given by the user and set up
* our data structures.
* @param provider the KeyProvider to use to generate keys
* @param schema the type tree that we search for annotations
* @param keyOverrides user specified key overrides
*/
private WriterEncryptionVariant[] setupEncryption(
KeyProvider provider,
TypeDescription schema,
Map<String, HadoopShims.KeyMetadata> keyOverrides) throws IOException {
keyProvider = provider != null ? provider :
CryptoUtils.getKeyProvider(conf, new SecureRandom());
// Load the overrides into the cache so that we use the required key versions.
for(HadoopShims.KeyMetadata key: keyOverrides.values()) {
keys.put(key.getKeyName(), new WriterEncryptionKey(key));
}
int variantCount = visitTypeTree(schema, false, keyProvider);
// Now that we have de-duped the keys and maskDescriptions, make the arrays
int nextId = 0;
if (variantCount > 0) {
for (MaskDescriptionImpl mask : maskDescriptions.values()) {
mask.setId(nextId++);
for (TypeDescription column : mask.getColumns()) {
this.columnMaskDescriptions[column.getId()] = mask;
}
}
}
nextId = 0;
int nextVariantId = 0;
WriterEncryptionVariant[] result = new WriterEncryptionVariant[variantCount];
for(WriterEncryptionKey key: keys.values()) {
key.setId(nextId++);
key.sortRoots();
for(WriterEncryptionVariant variant: key.getEncryptionRoots()) {
result[nextVariantId] = variant;
columnEncryption[variant.getRoot().getId()] = variant;
variant.setId(nextVariantId++);
}
}
return result;
}
@Override
public long estimateMemory() {
return this.treeWriter.estimateMemory();
}
}
| 36,027 | 34.046693 | 89 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/WriterInternal.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.Writer;
/**
* The ORC internal API to the writer.
*/
public interface WriterInternal extends Writer {
/**
* Increase the buffer size for this writer.
* This function is internal only and should only be called by the
* ORC file merger.
* @param newSize the new buffer size.
*/
void increaseCompressionSize(int newSize);
}
| 1,204 | 31.567568 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/ZlibCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.zip.DataFormatException;
import java.util.zip.Deflater;
import java.util.zip.Inflater;
public class ZlibCodec implements CompressionCodec, DirectDecompressionCodec {
private static final HadoopShims SHIMS = HadoopShimsFactory.get();
// Note: shim path does not care about levels and strategies (only used for decompression).
private HadoopShims.DirectDecompressor decompressShim = null;
private Boolean direct = null;
static class ZlibOptions implements Options {
private int level;
private int strategy;
private final boolean FIXED;
ZlibOptions(int level, int strategy, boolean fixed) {
this.level = level;
this.strategy = strategy;
FIXED = fixed;
}
@Override
public ZlibOptions copy() {
return new ZlibOptions(level, strategy, false);
}
@Override
public ZlibOptions setSpeed(SpeedModifier newValue) {
if (FIXED) {
throw new IllegalStateException("Attempt to modify the default options");
}
switch (newValue) {
case FAST:
// deflate_fast looking for 16 byte patterns
level = Deflater.BEST_SPEED + 1;
break;
case DEFAULT:
// deflate_slow looking for 128 byte patterns
level = Deflater.DEFAULT_COMPRESSION;
break;
case FASTEST:
// deflate_fast looking for 8 byte patterns
level = Deflater.BEST_SPEED;
break;
default:
break;
}
return this;
}
@Override
public ZlibOptions setData(DataKind newValue) {
if (FIXED) {
throw new IllegalStateException("Attempt to modify the default options");
}
switch (newValue) {
case BINARY:
/* filtered == less LZ77, more huffman */
strategy = Deflater.FILTERED;
break;
case TEXT:
strategy = Deflater.DEFAULT_STRATEGY;
break;
default:
break;
}
return this;
}
@Override
public boolean equals(Object other) {
if (other == null || getClass() != other.getClass()) {
return false;
} else if (this == other) {
return true;
} else {
ZlibOptions otherOpts = (ZlibOptions) other;
return level == otherOpts.level && strategy == otherOpts.strategy;
}
}
@Override
public int hashCode() {
return level + strategy * 101;
}
}
private static final ZlibOptions DEFAULT_OPTIONS =
new ZlibOptions(Deflater.DEFAULT_COMPRESSION, Deflater.DEFAULT_STRATEGY, true);
@Override
public Options getDefaultOptions() {
return DEFAULT_OPTIONS;
}
@Override
public boolean compress(ByteBuffer in, ByteBuffer out,
ByteBuffer overflow,
Options options) {
ZlibOptions zlo = (ZlibOptions) options;
int length = in.remaining();
int outSize = 0;
Deflater deflater = new Deflater(zlo.level, true);
try {
deflater.setStrategy(zlo.strategy);
deflater.setInput(in.array(), in.arrayOffset() + in.position(), length);
deflater.finish();
int offset = out.arrayOffset() + out.position();
while (!deflater.finished() && (length > outSize)) {
int size = deflater.deflate(out.array(), offset, out.remaining());
out.position(size + out.position());
outSize += size;
offset += size;
// if we run out of space in the out buffer, use the overflow
if (out.remaining() == 0) {
if (overflow == null) {
return false;
}
out = overflow;
offset = out.arrayOffset() + out.position();
}
}
} finally {
deflater.end();
}
return length > outSize;
}
@Override
public void decompress(ByteBuffer in, ByteBuffer out) throws IOException {
if(in.isDirect() && out.isDirect()) {
directDecompress(in, out);
return;
}
Inflater inflater = new Inflater(true);
try {
inflater.setInput(in.array(), in.arrayOffset() + in.position(),
in.remaining());
while (!(inflater.finished() || inflater.needsDictionary() ||
inflater.needsInput())) {
try {
int count = inflater.inflate(out.array(),
out.arrayOffset() + out.position(),
out.remaining());
out.position(count + out.position());
} catch (DataFormatException dfe) {
throw new IOException("Bad compression data", dfe);
}
}
out.flip();
} finally {
inflater.end();
}
in.position(in.limit());
}
@Override
public boolean isAvailable() {
if (direct == null) {
// see nowrap option in new Inflater(boolean) which disables zlib headers
try {
ensureShim();
direct = (decompressShim != null);
} catch (UnsatisfiedLinkError ule) {
direct = false;
}
}
return direct;
}
private void ensureShim() {
if (decompressShim == null) {
decompressShim = SHIMS.getDirectDecompressor(
HadoopShims.DirectCompressionType.ZLIB_NOHEADER);
}
}
@Override
public void directDecompress(ByteBuffer in, ByteBuffer out) throws IOException {
ensureShim();
decompressShim.decompress(in, out);
out.flip(); // flip for read
}
@Override
public void reset() {
if (decompressShim != null) {
decompressShim.reset();
}
}
@Override
public void destroy() {
if (decompressShim != null) {
decompressShim.end();
}
}
@Override
public CompressionKind getKind() {
return CompressionKind.ZLIB;
}
@Override
public void close() {
OrcCodecPool.returnCodec(CompressionKind.ZLIB, this);
}
}
| 6,787 | 27.885106 | 93 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/AndFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.orc.OrcFilterContext;
public class AndFilter implements VectorFilter {
public final VectorFilter[] filters;
private final Selected andBound = new Selected();
private final Selected andOut = new Selected();
public AndFilter(VectorFilter[] filters) {
this.filters = filters;
}
@Override
public void filter(OrcFilterContext fc,
Selected bound,
Selected selOut) {
// Each filter restricts the current selection. Make a copy of the current selection that will
// be used for the next filter and finally output to selOut
andBound.set(bound);
andOut.ensureSize(bound.selSize);
for (VectorFilter f : filters) {
andOut.clear();
f.filter(fc, andBound, andOut);
// Make the current selection the bound for the next filter in AND
andBound.set(andOut);
}
selOut.set(andOut);
}
}
| 1,745 | 34.632653 | 98 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/BatchFilterFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.filter.BatchFilter;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
/**
* Provides an abstraction layer between the VectorFilters and the
* Consumer<OrcFilterContext>.
*/
class BatchFilterFactory {
static BatchFilter create(List<BatchFilter> filters) {
if (filters.isEmpty()) {
return null;
} else if (filters.size() == 1) {
return filters.get(0);
} else {
return new AndBatchFilterImpl(filters.toArray(new BatchFilter[0]));
}
}
static BatchFilter create(Consumer<OrcFilterContext> filter,
String[] colNames) {
return filter instanceof BatchFilter ? (BatchFilter) filter
: new WrappedFilterImpl(filter, colNames);
}
static BatchFilter create(VectorFilter filter, String[] colNames) {
return new BatchFilterImpl(filter, colNames);
}
/**
* Use to wrap the VectorFilter for application by the BatchReader
*/
private static class BatchFilterImpl implements BatchFilter {
final VectorFilter filter;
private final String[] colNames;
private final Selected bound = new Selected();
private final Selected selOut = new Selected();
private BatchFilterImpl(VectorFilter filter, String[] colNames) {
this.filter = filter;
this.colNames = colNames;
}
@Override
public void accept(OrcFilterContext fc) {
// Define the bound to be the batch size
bound.initialize(fc);
// selOut is set to the selectedVector
selOut.sel = fc.getSelected();
selOut.selSize = 0;
filter.filter(fc, bound, selOut);
if (selOut.selSize < fc.getSelectedSize()) {
fc.setSelectedSize(selOut.selSize);
fc.setSelectedInUse(true);
} else if (selOut.selSize > fc.getSelectedSize()) {
throw new RuntimeException(
String.format("Unexpected state: Filtered size %s > input size %s",
selOut.selSize, fc.getSelectedSize()));
}
}
@Override
public String[] getColumnNames() {
return colNames;
}
}
static class AndBatchFilterImpl implements BatchFilter {
private final BatchFilter[] filters;
private final String[] colNames;
AndBatchFilterImpl(BatchFilter... filters) {
this.filters = filters;
Set<String> names = new HashSet<>();
for (BatchFilter filter : this.filters) {
names.addAll(Arrays.asList(filter.getColumnNames()));
}
this.colNames = names.toArray(new String[0]);
}
@Override
public void accept(OrcFilterContext fc) {
for (int i = 0; fc.getSelectedSize() > 0 && i < filters.length; i++) {
filters[i].accept(fc);
}
}
@Override
public String[] getColumnNames() {
return colNames;
}
}
private static class WrappedFilterImpl implements BatchFilter {
private final Consumer<OrcFilterContext> filter;
private final String[] colNames;
private WrappedFilterImpl(Consumer<OrcFilterContext> filter, String[] colNames) {
this.filter = filter;
this.colNames = colNames;
}
@Override
public String[] getColumnNames() {
return colNames;
}
@Override
public void accept(OrcFilterContext filterContext) {
filter.accept(filterContext);
}
}
}
| 4,259 | 29.212766 | 85 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/FilterFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.filter.BatchFilter;
import org.apache.orc.filter.PluginFilterService;
import org.apache.orc.impl.filter.leaf.LeafFilterFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.ServiceLoader;
import java.util.Set;
public class FilterFactory {
private static final Logger LOG = LoggerFactory.getLogger(FilterFactory.class);
/**
* Create a BatchFilter. This considers both the input filter and the SearchArgument filter. If
* both are available then they are compounded by AND.
*
* @param opts for reading the file
* @param readSchema that should be used
* @param isSchemaCaseAware identifies if the schema is case-sensitive
* @param version provides the ORC file version
* @param normalize identifies if the SArg should be normalized or not
* @param filePath that is fully qualified to determine plugin filter(s)
* @param conf configuration shared when determining Plugin filter(s)
* @return BatchFilter that represents the SearchArgument or null
*/
public static BatchFilter createBatchFilter(Reader.Options opts,
TypeDescription readSchema,
boolean isSchemaCaseAware,
OrcFile.Version version,
boolean normalize,
String filePath,
Configuration conf) {
List<BatchFilter> filters = new ArrayList<>(2);
// 1. Process input filter
if (opts.getFilterCallback() != null) {
filters.add(BatchFilterFactory.create(opts.getFilterCallback(),
opts.getPreFilterColumnNames()));
}
// 2. Process PluginFilter
if (opts.allowPluginFilters()) {
List<BatchFilter> pluginFilters = findPluginFilters(filePath, conf);
pluginFilters = getAllowedFilters(pluginFilters, opts.pluginAllowListFilters());
if (!pluginFilters.isEmpty()) {
LOG.debug("Added plugin filters {} to the read", pluginFilters);
filters.addAll(pluginFilters);
}
}
// 3. Process SArgFilter
if (opts.isAllowSARGToFilter() && opts.getSearchArgument() != null) {
SearchArgument sArg = opts.getSearchArgument();
Set<String> colNames = new HashSet<>();
try {
ExpressionTree exprTree = normalize ? sArg.getExpression() : sArg.getCompactExpression();
LOG.debug("normalize={}, using expressionTree={}", normalize, exprTree);
filters.add(BatchFilterFactory.create(createSArgFilter(exprTree,
colNames,
sArg.getLeaves(),
readSchema,
isSchemaCaseAware,
version),
colNames.toArray(new String[0])));
} catch (UnSupportedSArgException e) {
LOG.warn("SArg: {} is not supported\n{}", sArg, e.getMessage());
}
}
return BatchFilterFactory.create(filters);
}
public static VectorFilter createSArgFilter(ExpressionTree expr,
Set<String> colIds,
List<PredicateLeaf> leaves,
TypeDescription readSchema,
boolean isSchemaCaseAware,
OrcFile.Version version)
throws UnSupportedSArgException {
VectorFilter result;
switch (expr.getOperator()) {
case OR:
VectorFilter[] orFilters = new VectorFilter[expr.getChildren().size()];
for (int i = 0; i < expr.getChildren().size(); i++) {
orFilters[i] = createSArgFilter(expr.getChildren().get(i),
colIds,
leaves,
readSchema,
isSchemaCaseAware,
version);
}
result = new OrFilter(orFilters);
break;
case AND:
VectorFilter[] andFilters = new VectorFilter[expr.getChildren().size()];
for (int i = 0; i < expr.getChildren().size(); i++) {
andFilters[i] = createSArgFilter(expr.getChildren().get(i),
colIds,
leaves,
readSchema,
isSchemaCaseAware,
version);
}
result = new AndFilter(andFilters);
break;
case NOT:
// Not is expected to be pushed down that it only happens on leaf filters
ExpressionTree leaf = expr.getChildren().get(0);
assert leaf.getOperator() == ExpressionTree.Operator.LEAF;
result = LeafFilterFactory.createLeafVectorFilter(leaves.get(leaf.getLeaf()),
colIds,
readSchema,
isSchemaCaseAware,
version,
true);
break;
case LEAF:
result = LeafFilterFactory.createLeafVectorFilter(leaves.get(expr.getLeaf()),
colIds,
readSchema,
isSchemaCaseAware,
version,
false);
break;
default:
throw new UnSupportedSArgException(String.format("SArg expression: %s is not supported",
expr));
}
return result;
}
public static class UnSupportedSArgException extends Exception {
public UnSupportedSArgException(String message) {
super(message);
}
}
/**
* Find filter(s) for a given file path. The order in which the filter services are invoked is
* unpredictable.
*
* @param filePath fully qualified path of the file being evaluated
* @param conf reader configuration of ORC, can be used to configure the filter services
* @return The plugin filter(s) matching the given file, can be empty if none are found
*/
static List<BatchFilter> findPluginFilters(String filePath, Configuration conf) {
List<BatchFilter> filters = new ArrayList<>();
for (PluginFilterService s : ServiceLoader.load(PluginFilterService.class)) {
LOG.debug("Processing filter service {}", s);
BatchFilter filter = s.getFilter(filePath, conf);
if (filter != null) {
filters.add(filter);
}
}
return filters;
}
/**
* Filter BatchFilter which is in the allowList.
*
* @param filters whole BatchFilter list we load from class path.
* @param allowList a Class-Name list that we want to load in.
*/
private static List<BatchFilter> getAllowedFilters(
List<BatchFilter> filters, List<String> allowList) {
List<BatchFilter> allowBatchFilters = new ArrayList<>();
if (allowList != null && allowList.contains("*")) {
return filters;
}
if (allowList == null || allowList.isEmpty() || filters == null) {
LOG.debug("Disable all PluginFilter.");
return allowBatchFilters;
}
for (BatchFilter filter: filters) {
if (allowList.contains(filter.getClass().getName())) {
allowBatchFilters.add(filter);
} else {
LOG.debug("Ignoring filter service {}", filter);
}
}
return allowBatchFilters;
}
}
| 9,446 | 42.334862 | 97 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/IsNotNullFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.OrcFilterContext;
public class IsNotNullFilter implements VectorFilter {
private final String colName;
public IsNotNullFilter(String colName) {
this.colName = colName;
}
@Override
public void filter(OrcFilterContext fc,
Selected bound,
Selected selOut) {
ColumnVector[] branch = fc.findColumnVector(colName);
ColumnVector v = branch[branch.length - 1];
boolean noNulls = OrcFilterContext.noNulls(branch);
if (noNulls || (v.isRepeating && !OrcFilterContext.isNull(branch, 0))) {
// In case we don't have any nulls, then irrespective of the repeating status, select all the
// values
selOut.selectAll(bound);
} else if (!v.isRepeating) {
int currSize = 0;
int rowIdx;
// As we have at least one null in this branch, we only need to check if it is repeating
// otherwise the repeating value will be null.
for (int i = 0; i < bound.selSize; i++) {
rowIdx = bound.sel[i];
// Select if the value is not null
if (!OrcFilterContext.isNull(branch, rowIdx)) {
selOut.sel[currSize++] = rowIdx;
}
}
selOut.selSize = currSize;
}
}
}
| 2,135 | 34.016393 | 99 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/IsNullFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.OrcFilterContext;
public class IsNullFilter implements VectorFilter {
private final String colName;
public IsNullFilter(String colName) {
this.colName = colName;
}
@Override
public void filter(OrcFilterContext fc,
Selected bound,
Selected selOut) {
ColumnVector[] branch = fc.findColumnVector(colName);
ColumnVector v = branch[branch.length - 1];
boolean noNulls = OrcFilterContext.noNulls(branch);
// If the vector does not have nulls then none of them are selected and nothing to do
if (!noNulls) {
if (v.isRepeating && OrcFilterContext.isNull(branch, 0)) {
// If the repeating vector is null then set all as selected.
selOut.selectAll(bound);
} else {
int currSize = 0;
int rowIdx;
for (int i = 0; i < bound.selSize; i++) {
// Identify the rowIdx from the selected vector
rowIdx = bound.sel[i];
if (OrcFilterContext.isNull(branch, rowIdx)) {
selOut.sel[currSize++] = rowIdx;
}
}
selOut.selSize = currSize;
}
}
}
}
| 2,056 | 32.721311 | 89 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/LeafFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.OrcFilterContext;
public abstract class LeafFilter implements VectorFilter {
public String getColName() {
return colName;
}
private final String colName;
private final boolean negated;
protected LeafFilter(String colName, boolean negated) {
this.colName = colName;
this.negated = negated;
}
@Override
public void filter(OrcFilterContext fc,
Selected bound,
Selected selOut) {
ColumnVector[] branch = fc.findColumnVector(colName);
ColumnVector v = branch[branch.length - 1];
boolean noNulls = OrcFilterContext.noNulls(branch);
int currSize = 0;
int rowIdx;
if (v.isRepeating) {
if (!OrcFilterContext.isNull(branch, 0) && allowWithNegation(v, 0)) {
// If the repeating value is allowed then allow the current selSize
for (int i = 0; i < bound.selSize; i++) {
rowIdx = bound.sel[i];
selOut.sel[currSize++] = rowIdx;
}
}
} else if (noNulls) {
for (int i = 0; i < bound.selSize; i++) {
rowIdx = bound.sel[i];
// Check the value
if (allowWithNegation(v, rowIdx)) {
selOut.sel[currSize++] = rowIdx;
}
}
} else {
for (int i = 0; i < bound.selSize; i++) {
rowIdx = bound.sel[i];
// Check the value only if not null
if (!OrcFilterContext.isNull(branch, rowIdx) &&
allowWithNegation(v, rowIdx)) {
selOut.sel[currSize++] = rowIdx;
}
}
}
selOut.selSize = currSize;
}
private boolean allowWithNegation(ColumnVector v, int rowIdx) {
return allow(v, rowIdx) != negated;
}
protected abstract boolean allow(ColumnVector v, int rowIdx);
}
| 2,653 | 30.223529 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/OrFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.orc.OrcFilterContext;
public class OrFilter implements VectorFilter {
public final VectorFilter[] filters;
private final Selected orOut = new Selected();
private final Selected orBound = new Selected();
public OrFilter(VectorFilter[] filters) {
this.filters = filters;
}
@Override
public void filter(OrcFilterContext fc,
Selected bound,
Selected selOut) {
orOut.ensureSize(bound.selSize);
orBound.set(bound);
for (VectorFilter f : filters) {
// In case of OR since we have to add to existing output, pass the out as empty
orOut.clear();
f.filter(fc, orBound, orOut);
// During an OR operation the size cannot decrease, merge the current selections into selOut
selOut.unionDisjoint(orOut);
// Remove these from the bound as they don't need any further evaluation
orBound.minus(orOut);
}
}
}
| 1,776 | 34.54 | 98 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/Selected.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.orc.OrcFilterContext;
/**
* Wrapper class for the selected vector that centralizes the convenience functions
*/
public class Selected {
// Sorted array of row indices
int[] sel;
int selSize;
Selected(int[] sel) {
this.sel = sel;
this.selSize = 0;
}
Selected() {
this(new int[1024]);
}
void clear() {
this.selSize = 0;
}
void selectAll(Selected src) {
System.arraycopy(src.sel, 0, this.sel, 0, src.selSize);
this.selSize = src.selSize;
}
/**
* Initialize the selected vector from the supplied filter context
*
* @param fc Input filterContext
*/
void initialize(OrcFilterContext fc) {
ensureSize(fc.getSelectedSize());
selSize = fc.getSelectedSize();
if (fc.isSelectedInUse()) {
System.arraycopy(fc.getSelected(), 0, sel, 0, selSize);
} else {
for (int i = 0; i < selSize; i++) {
sel[i] = i;
}
}
}
/**
* Only adjust the size and don't worry about the state, if required this is handled before
* this is
* called.
*
* @param size Desired size
*/
void ensureSize(int size) {
if (size > sel.length) {
sel = new int[size];
selSize = 0;
}
}
void set(Selected inBound) {
ensureSize(inBound.selSize);
System.arraycopy(inBound.sel, 0, sel, 0, inBound.selSize);
selSize = inBound.selSize;
}
/**
* Expects the elements the src to be disjoint with respect to this and is not validated.
*
* @param src The disjoint selection indices that should be merged into this.
*/
void unionDisjoint(Selected src) {
// merge from the back to avoid the need for an intermediate store
int writeIdx = src.selSize + this.selSize - 1;
int srcIdx = src.selSize - 1;
int thisIdx = this.selSize - 1;
while (thisIdx >= 0 || srcIdx >= 0) {
if (srcIdx < 0 || (thisIdx >= 0 && src.sel[srcIdx] < this.sel[thisIdx])) {
// src is exhausted or this is larger
this.sel[writeIdx--] = this.sel[thisIdx--];
} else {
this.sel[writeIdx--] = src.sel[srcIdx--];
}
}
this.selSize += src.selSize;
}
/**
* Remove the elements of src from this.
*
* @param src The selection indices that should be removed from the current selection.
*/
void minus(Selected src) {
int writeidx = 0;
int evalIdx = 0;
int srcIdx = 0;
while (srcIdx < src.selSize && evalIdx < this.selSize) {
if (this.sel[evalIdx] < src.sel[srcIdx]) {
// Evaluation is smaller so retain this
this.sel[writeidx] = this.sel[evalIdx];
evalIdx += 1;
writeidx += 1;
} else if (this.sel[evalIdx] > src.sel[srcIdx]) {
// Evaluation is larger cannot decide, navigate src forward
srcIdx += 1;
} else {
// Equal should be ignored so move both evalIdx and srcIdx forward
evalIdx += 1;
srcIdx += 1;
}
}
if (evalIdx < this.selSize) {
System.arraycopy(this.sel, evalIdx, this.sel, writeidx, this.selSize - evalIdx);
writeidx += this.selSize - evalIdx;
}
this.selSize = writeidx;
}
}
| 3,969 | 27.357143 | 93 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/VectorFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.orc.OrcFilterContext;
/**
* A filter that operates on the supplied
* {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} and updates the selections.
* <p>
* This is the interface that is the basis of both the leaf filters such as Equals, In and logical
* filters such as And, Or and Not
*/
public interface VectorFilter {
/**
* Filter the vectorized row batch that is wrapped into the FilterContext.
* @param fc The filter context that wraps the VectorizedRowBatch
* @param bound The bound of the scan, it is expected that the filter only operates on the bound
* and change the selection status of the rows scoped by the bound. The filter is
* expected to leave the bound unchanged.
* @param selOut The filter should update the selOut for the elements scoped by bound. The selOut
* should be sorted in ascending order
*/
void filter(OrcFilterContext fc, Selected bound, Selected selOut);
}
| 1,848 | 42 | 99 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/DecimalFilters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.filter.LeafFilter;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
class DecimalFilters {
private DecimalFilters() {
}
static class DecimalBetween extends LeafFilter {
private final HiveDecimalWritable low;
private final HiveDecimalWritable high;
DecimalBetween(String colName, Object low, Object high, boolean negated) {
super(colName, negated);
this.low = (HiveDecimalWritable) low;
this.high = (HiveDecimalWritable) high;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DecimalColumnVector) v).vector[rowIdx].compareTo(low) >= 0 &&
((DecimalColumnVector) v).vector[rowIdx].compareTo(high) <= 0;
}
}
static class DecimalEquals extends LeafFilter {
private final HiveDecimalWritable aValue;
DecimalEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (HiveDecimalWritable) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DecimalColumnVector) v).vector[rowIdx].compareTo(aValue) == 0;
}
}
static class DecimalIn extends LeafFilter {
private final Set<HiveDecimalWritable> inValues;
DecimalIn(String colName, List<Object> values, boolean negated) {
super(colName, negated);
inValues = new HashSet<>(values.size());
for (Object value : values) {
inValues.add((HiveDecimalWritable) value);
}
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return inValues.contains(((DecimalColumnVector) v).vector[rowIdx]);
}
}
static class DecimalLessThan extends LeafFilter {
private final HiveDecimalWritable aValue;
DecimalLessThan(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (HiveDecimalWritable) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DecimalColumnVector) v).vector[rowIdx].compareTo(aValue) < 0;
}
}
static class DecimalLessThanEquals extends LeafFilter {
private final HiveDecimalWritable aValue;
DecimalLessThanEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (HiveDecimalWritable) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DecimalColumnVector) v).vector[rowIdx].compareTo(aValue) <= 0;
}
}
}
| 3,562 | 31.390909 | 78 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/FloatFilters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.orc.impl.filter.LeafFilter;
import java.util.Arrays;
import java.util.List;
class FloatFilters {
private FloatFilters() {
}
static class FloatBetween extends LeafFilter {
private final double low;
private final double high;
FloatBetween(String colName, Object low, Object high, boolean negated) {
super(colName, negated);
this.low = (double) low;
this.high = (double) high;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DoubleColumnVector) v).vector[rowIdx] >= low &&
((DoubleColumnVector) v).vector[rowIdx] <= high;
}
}
static class FloatEquals extends LeafFilter {
private final double aValue;
FloatEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (double) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DoubleColumnVector) v).vector[rowIdx] == aValue;
}
}
static class FloatIn extends LeafFilter {
private final double[] inValues;
FloatIn(String colName, List<Object> values, boolean negated) {
super(colName, negated);
inValues = new double[values.size()];
for (int i = 0; i < values.size(); i++) {
inValues[i] = (double) values.get(i);
}
Arrays.sort(inValues);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return Arrays.binarySearch(inValues, ((DoubleColumnVector) v).vector[rowIdx]) >= 0;
}
}
static class FloatLessThan extends LeafFilter {
private final double aValue;
FloatLessThan(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (double) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DoubleColumnVector) v).vector[rowIdx] < aValue;
}
}
static class FloatLessThanEquals extends LeafFilter {
private final double aValue;
FloatLessThanEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (double) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((DoubleColumnVector) v).vector[rowIdx] <= aValue;
}
}
}
| 3,283 | 29.12844 | 89 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/LeafFilterFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.filter.FilterFactory;
import org.apache.orc.impl.filter.IsNotNullFilter;
import org.apache.orc.impl.filter.IsNullFilter;
import org.apache.orc.impl.filter.LeafFilter;
import org.apache.orc.impl.filter.VectorFilter;
import java.sql.Date;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import static org.apache.orc.impl.TreeReaderFactory.isDecimalAsLong;
public class LeafFilterFactory {
private LeafFilterFactory() {}
private static LeafFilter createEqualsFilter(String colName,
PredicateLeaf.Type type,
Object literal,
TypeDescription colType,
OrcFile.Version version,
boolean negated) {
switch (type) {
case BOOLEAN:
return new LongFilters.LongEquals(colName, (boolean) literal ? 1L : 0L, negated);
case DATE:
return new LongFilters.LongEquals(colName,
((Date) literal).toLocalDate().toEpochDay(), negated);
case DECIMAL:
HiveDecimalWritable d = (HiveDecimalWritable) literal;
assert d.scale() <= colType.getScale();
if (isDecimalAsLong(version, colType.getPrecision())) {
return new LongFilters.LongEquals(colName, d.serialize64(colType.getScale()), negated);
} else {
return new DecimalFilters.DecimalEquals(colName, d, negated);
}
case FLOAT:
return new FloatFilters.FloatEquals(colName, literal, negated);
case LONG:
return new LongFilters.LongEquals(colName, literal, negated);
case STRING:
return new StringFilters.StringEquals(colName, literal, negated);
case TIMESTAMP:
return new TimestampFilters.TimestampEquals(colName, literal, negated);
default:
throw new IllegalArgumentException(String.format("Equals does not support type: %s", type));
}
}
private static LeafFilter createLessThanFilter(String colName,
PredicateLeaf.Type type,
Object literal,
TypeDescription colType,
OrcFile.Version version,
boolean negated) {
switch (type) {
case BOOLEAN:
return new LongFilters.LongLessThan(colName, (boolean) literal ? 1L : 0L, negated);
case DATE:
return new LongFilters.LongLessThan(colName,
((Date) literal).toLocalDate().toEpochDay(), negated);
case DECIMAL:
HiveDecimalWritable d = (HiveDecimalWritable) literal;
assert d.scale() <= colType.getScale();
if (isDecimalAsLong(version, colType.getPrecision())) {
return new LongFilters.LongLessThan(colName, d.serialize64(colType.getScale()),
negated);
} else {
return new DecimalFilters.DecimalLessThan(colName, d, negated);
}
case FLOAT:
return new FloatFilters.FloatLessThan(colName, literal, negated);
case LONG:
return new LongFilters.LongLessThan(colName, literal, negated);
case STRING:
return new StringFilters.StringLessThan(colName, literal, negated);
case TIMESTAMP:
return new TimestampFilters.TimestampLessThan(colName, literal, negated);
default:
throw new IllegalArgumentException(String.format("LessThan does not support type: %s", type));
}
}
private static LeafFilter createLessThanEqualsFilter(String colName,
PredicateLeaf.Type type,
Object literal,
TypeDescription colType,
OrcFile.Version version,
boolean negated) {
switch (type) {
case BOOLEAN:
return new LongFilters.LongLessThanEquals(colName, (boolean) literal ? 1L : 0L,
negated);
case DATE:
return new LongFilters.LongLessThanEquals(colName,
((Date) literal).toLocalDate().toEpochDay(),
negated);
case DECIMAL:
HiveDecimalWritable d = (HiveDecimalWritable) literal;
assert d.scale() <= colType.getScale();
if (isDecimalAsLong(version, colType.getPrecision())) {
return new LongFilters.LongLessThanEquals(colName,
d.serialize64(colType.getScale()), negated);
} else {
return new DecimalFilters.DecimalLessThanEquals(colName, d, negated);
}
case FLOAT:
return new FloatFilters.FloatLessThanEquals(colName, literal, negated);
case LONG:
return new LongFilters.LongLessThanEquals(colName, literal, negated);
case STRING:
return new StringFilters.StringLessThanEquals(colName, literal, negated);
case TIMESTAMP:
return new TimestampFilters.TimestampLessThanEquals(colName, literal, negated);
default:
throw new IllegalArgumentException(String.format("LessThanEquals does not support type: %s", type));
}
}
private static LeafFilter createBetweenFilter(String colName,
PredicateLeaf.Type type,
Object low,
Object high,
TypeDescription colType,
OrcFile.Version version,
boolean negated) {
switch (type) {
case BOOLEAN:
return new LongFilters.LongBetween(colName, (boolean) low ? 1L : 0L,
(boolean) high ? 1L : 0L, negated);
case DATE:
return new LongFilters.LongBetween(colName, ((Date) low).toLocalDate().toEpochDay(),
((Date) high).toLocalDate().toEpochDay(), negated);
case DECIMAL:
HiveDecimalWritable dLow = (HiveDecimalWritable) low;
HiveDecimalWritable dHigh = (HiveDecimalWritable) high;
assert dLow.scale() <= colType.getScale() && dLow.scale() <= colType.getScale();
if (isDecimalAsLong(version, colType.getPrecision())) {
return new LongFilters.LongBetween(colName, dLow.serialize64(colType.getScale()),
dHigh.serialize64(colType.getScale()), negated);
} else {
return new DecimalFilters.DecimalBetween(colName, dLow, dHigh, negated);
}
case FLOAT:
return new FloatFilters.FloatBetween(colName, low, high, negated);
case LONG:
return new LongFilters.LongBetween(colName, low, high, negated);
case STRING:
return new StringFilters.StringBetween(colName, low, high, negated);
case TIMESTAMP:
return new TimestampFilters.TimestampBetween(colName, low, high, negated);
default:
throw new IllegalArgumentException(String.format("Between does not support type: %s", type));
}
}
private static LeafFilter createInFilter(String colName,
PredicateLeaf.Type type,
List<Object> inList,
TypeDescription colType,
OrcFile.Version version,
boolean negated) {
switch (type) {
case BOOLEAN:
return new LongFilters.LongIn(colName,
inList.stream().map((Object v) -> (boolean) v ? 1L : 0L)
.collect(Collectors.toList()), negated);
case DATE:
return new LongFilters.LongIn(colName,
inList.stream()
.map((Object v) -> ((Date) v).toLocalDate().toEpochDay())
.collect(Collectors.toList()), negated);
case DECIMAL:
if (isDecimalAsLong(version, colType.getPrecision())) {
List<Object> values = new ArrayList<>(inList.size());
for (Object o : inList) {
HiveDecimalWritable v = (HiveDecimalWritable) o;
assert v.scale() <= colType.getScale();
values.add(v.serialize64(colType.getScale()));
}
return new LongFilters.LongIn(colName, values, negated);
} else {
return new DecimalFilters.DecimalIn(colName, inList, negated);
}
case FLOAT:
return new FloatFilters.FloatIn(colName, inList, negated);
case LONG:
return new LongFilters.LongIn(colName, inList, negated);
case STRING:
return new StringFilters.StringIn(colName, inList, negated);
case TIMESTAMP:
return new TimestampFilters.TimestampIn(colName, inList, negated);
default:
throw new IllegalArgumentException(String.format("In does not support type: %s", type));
}
}
public static VectorFilter createLeafVectorFilter(PredicateLeaf leaf,
Set<String> colIds,
TypeDescription readSchema,
boolean isSchemaCaseAware,
OrcFile.Version version,
boolean negated)
throws FilterFactory.UnSupportedSArgException {
colIds.add(leaf.getColumnName());
TypeDescription colType = readSchema.findSubtype(leaf.getColumnName(), isSchemaCaseAware);
switch (leaf.getOperator()) {
case IN:
return createInFilter(leaf.getColumnName(),
leaf.getType(),
leaf.getLiteralList(),
colType,
version,
negated);
case EQUALS:
return createEqualsFilter(leaf.getColumnName(),
leaf.getType(),
leaf.getLiteral(),
colType,
version,
negated);
case LESS_THAN:
return createLessThanFilter(leaf.getColumnName(),
leaf.getType(),
leaf.getLiteral(),
colType,
version,
negated);
case LESS_THAN_EQUALS:
return createLessThanEqualsFilter(leaf.getColumnName(),
leaf.getType(),
leaf.getLiteral(),
colType,
version,
negated);
case BETWEEN:
return createBetweenFilter(leaf.getColumnName(),
leaf.getType(),
leaf.getLiteralList().get(0),
leaf.getLiteralList().get(1),
colType,
version,
negated);
case IS_NULL:
return negated ? new IsNotNullFilter(leaf.getColumnName()) :
new IsNullFilter(leaf.getColumnName());
default:
throw new FilterFactory.UnSupportedSArgException(
String.format("Predicate: %s is not supported", leaf));
}
}
}
| 12,583 | 44.103943 | 108 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/LongFilters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.orc.impl.filter.LeafFilter;
import java.util.Arrays;
import java.util.List;
class LongFilters {
private LongFilters() {
}
static class LongBetween extends LeafFilter {
private final long low;
private final long high;
LongBetween(String colName, Object low, Object high, boolean negated) {
super(colName, negated);
this.low = (long) low;
this.high = (long) high;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((LongColumnVector) v).vector[rowIdx] >= low &&
((LongColumnVector) v).vector[rowIdx] <= high;
}
}
static class LongEquals extends LeafFilter {
private final long aValue;
LongEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (long) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((LongColumnVector) v).vector[rowIdx] == aValue;
}
}
static class LongIn extends LeafFilter {
private final long[] inValues;
LongIn(String colName, List<Object> values, boolean negated) {
super(colName, negated);
inValues = new long[values.size()];
for (int i = 0; i < values.size(); i++) {
inValues[i] = (long) values.get(i);
}
Arrays.sort(inValues);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return Arrays.binarySearch(inValues, ((LongColumnVector) v).vector[rowIdx]) >= 0;
}
}
static class LongLessThan extends LeafFilter {
private final long aValue;
LongLessThan(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (long) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((LongColumnVector) v).vector[rowIdx] < aValue;
}
}
static class LongLessThanEquals extends LeafFilter {
private final long aValue;
LongLessThanEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (long) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((LongColumnVector) v).vector[rowIdx] <= aValue;
}
}
}
| 3,234 | 28.678899 | 87 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/StringFilters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
import org.apache.orc.impl.filter.LeafFilter;
import org.apache.orc.util.CuckooSetBytes;
import java.nio.charset.StandardCharsets;
import java.util.List;
class StringFilters {
private StringFilters() {
}
static class StringBetween extends LeafFilter {
private final byte[] low;
private final byte[] high;
StringBetween(String colName, Object low, Object high, boolean negated) {
super(colName, negated);
this.low = ((String) low).getBytes(StandardCharsets.UTF_8);
this.high = ((String) high).getBytes(StandardCharsets.UTF_8);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
BytesColumnVector bv = (BytesColumnVector) v;
return StringExpr.compare(bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx],
low, 0, low.length) >= 0 &&
StringExpr.compare(bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx],
high, 0, high.length) <= 0;
}
}
static class StringEquals extends LeafFilter {
private final byte[] aValue;
StringEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = ((String) aValue).getBytes(StandardCharsets.UTF_8);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
BytesColumnVector bv = (BytesColumnVector) v;
return StringExpr.equal(aValue, 0, aValue.length,
bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx]);
}
}
static class StringIn extends LeafFilter {
// The set object containing the IN list. This is optimized for lookup
// of the data type of the column.
private final CuckooSetBytes inSet;
StringIn(String colName, List<Object> values, boolean negated) {
super(colName, negated);
final byte[][] inValues = new byte[values.size()][];
for (int i = 0; i < values.size(); i++) {
inValues[i] = ((String) values.get(i)).getBytes(StandardCharsets.UTF_8);
}
inSet = new CuckooSetBytes(inValues.length);
inSet.load(inValues);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
BytesColumnVector bv = (BytesColumnVector) v;
return inSet.lookup(bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx]);
}
}
static class StringLessThan extends LeafFilter {
private final byte[] aValue;
StringLessThan(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = ((String) aValue).getBytes(StandardCharsets.UTF_8);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
BytesColumnVector bv = (BytesColumnVector) v;
return StringExpr.compare(bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx],
aValue, 0, aValue.length) < 0;
}
}
static class StringLessThanEquals extends LeafFilter {
private final byte[] aValue;
StringLessThanEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = ((String) aValue).getBytes(StandardCharsets.UTF_8);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
BytesColumnVector bv = (BytesColumnVector) v;
return StringExpr.compare(bv.vector[rowIdx], bv.start[rowIdx], bv.length[rowIdx],
aValue, 0, aValue.length) <= 0;
}
}
}
| 4,497 | 35.274194 | 87 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/filter/leaf/TimestampFilters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.orc.impl.filter.LeafFilter;
import java.sql.Timestamp;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
class TimestampFilters {
private TimestampFilters() {
}
static class TimestampBetween extends LeafFilter {
private final Timestamp low;
private final Timestamp high;
TimestampBetween(String colName, Object low, Object high, boolean negated) {
super(colName, negated);
this.low = (Timestamp) low;
this.high = (Timestamp) high;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((TimestampColumnVector) v).compareTo(rowIdx, low) >= 0 &&
((TimestampColumnVector) v).compareTo(rowIdx, high) <= 0;
}
}
static class TimestampEquals extends LeafFilter {
private final Timestamp aValue;
TimestampEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (Timestamp) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((TimestampColumnVector) v).compareTo(rowIdx, aValue) == 0;
}
}
static class TimestampIn extends LeafFilter {
private final Set<Timestamp> inValues;
TimestampIn(String colName, List<Object> values, boolean negated) {
super(colName, negated);
inValues = new HashSet<>(values.size());
for (Object value : values) {
inValues.add((Timestamp) value);
}
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return inValues.contains(((TimestampColumnVector) v).asScratchTimestamp(rowIdx));
}
}
static class TimestampLessThan extends LeafFilter {
private final Timestamp aValue;
TimestampLessThan(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (Timestamp) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((TimestampColumnVector) v).compareTo(rowIdx, aValue) < 0;
}
}
static class TimestampLessThanEquals extends LeafFilter {
private final Timestamp aValue;
TimestampLessThanEquals(String colName, Object aValue, boolean negated) {
super(colName, negated);
this.aValue = (Timestamp) aValue;
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((TimestampColumnVector) v).compareTo(rowIdx, aValue) <= 0;
}
}
}
| 3,424 | 29.855856 | 87 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/DecimalIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.orc.DataMask;
/**
* An identity data mask for decimal types.
*/
public class DecimalIdentity implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
DecimalColumnVector target = (DecimalColumnVector) masked;
DecimalColumnVector source = (DecimalColumnVector) original;
target.scale = source.scale;
target.precision = source.precision;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (source.isRepeating) {
target.vector[0].set(source.vector[0]);
target.isNull[0] = source.isNull[0];
} else if (source.noNulls) {
for(int r = start; r < start + length; ++r) {
target.vector[r].set(source.vector[r]);
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (!target.isNull[r]) {
target.vector[r].set(source.vector[r]);
}
}
}
}
}
| 1,998 | 34.696429 | 77 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/DoubleIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.orc.DataMask;
/**
* An identity data mask for floating point types.
*/
public class DoubleIdentity implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
DoubleColumnVector target = (DoubleColumnVector) masked;
DoubleColumnVector source = (DoubleColumnVector) original;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (source.isRepeating) {
target.vector[0] = source.vector[0];
target.isNull[0] = source.isNull[0];
} else if (source.noNulls) {
for(int r = start; r < start + length; ++r) {
target.vector[r] = source.vector[r];
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
target.vector[r] = source.vector[r];
}
}
}
}
| 1,870 | 35.686275 | 77 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/ListIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.orc.DataMask;
/**
* A data mask for list types that applies the given masks to its
* children, but doesn't mask at this level.
*/
public class ListIdentity implements DataMask {
private final DataMask child;
ListIdentity(DataMask[] child) {
this.child = child[0];
}
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start, int length) {
ListColumnVector source = (ListColumnVector) original;
ListColumnVector target = (ListColumnVector) masked;
target.noNulls = source.noNulls;
target.isRepeating = source.isRepeating;
if (source.isRepeating) {
if (!source.noNulls && source.isNull[0]) {
target.isNull[0] = true;
} else {
target.lengths[0] = source.lengths[0];
child.maskData(source.child, target.child, (int) source.offsets[0],
(int) source.lengths[0]);
}
} else if (source.noNulls) {
for(int r=start; r < start+length; ++r) {
target.offsets[r] = source.offsets[r];
target.lengths[r] = source.lengths[r];
child.maskData(source.child, target.child, (int) target.offsets[r],
(int) target.lengths[r]);
}
} else {
for(int r=start; r < start+length; ++r) {
target.isNull[r] = source.isNull[r];
if (!source.isNull[r]) {
target.offsets[r] = source.offsets[r];
target.lengths[r] = source.lengths[r];
child.maskData(source.child, target.child, (int) target.offsets[r],
(int) target.lengths[r]);
}
}
}
}
}
| 2,543 | 35.869565 | 91 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/LongIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.orc.DataMask;
/**
* An identity data mask for integer types.
*/
public class LongIdentity implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
LongColumnVector target = (LongColumnVector) masked;
LongColumnVector source = (LongColumnVector) original;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (original.isRepeating) {
target.vector[0] = source.vector[0];
target.isNull[0] = source.isNull[0];
} else if (source.noNulls) {
for(int r = start; r < start + length; ++r) {
target.vector[r] = source.vector[r];
}
} else {
for(int r = start; r < start + length; ++r) {
target.vector[r] = source.vector[r];
target.isNull[r] = source.isNull[r];
}
}
}
}
| 1,853 | 35.352941 | 77 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/MapIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.orc.DataMask;
/**
* A data mask for map types that applies the given masks to its
* children, but doesn't mask at this level.
*/
public class MapIdentity implements DataMask {
private final DataMask keyMask;
private final DataMask valueMask;
MapIdentity(DataMask[] children) {
this.keyMask = children[0];
this.valueMask = children[1];
}
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
MapColumnVector source = (MapColumnVector) original;
MapColumnVector target = (MapColumnVector) masked;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (source.isRepeating) {
target.isNull[0] = source.isNull[0];
if (source.noNulls || !source.isNull[0]) {
target.lengths[0] = source.lengths[0];
keyMask.maskData(source.keys, target.keys, (int) source.offsets[0],
(int) source.lengths[0]);
valueMask.maskData(source.values, target.values, (int) source.offsets[0],
(int) source.lengths[0]); }
} else if (source.noNulls) {
for(int r=start; r < start+length; ++r) {
target.offsets[r] = source.offsets[r];
target.lengths[r] = source.lengths[r];
keyMask.maskData(source.keys, target.keys, (int) target.offsets[r],
(int) target.lengths[r]);
valueMask.maskData(source.values, target.values, (int) target.offsets[r],
(int) target.lengths[r]);
}
} else {
for(int r=start; r < start+length; ++r) {
target.isNull[r] = source.isNull[r];
if (!source.isNull[r]) {
target.offsets[r] = source.offsets[r];
target.lengths[r] = source.lengths[r];
keyMask.maskData(source.keys, target.keys, (int) target.offsets[r],
(int) target.lengths[r]);
valueMask.maskData(source.values, target.values, (int) target.offsets[r],
(int) target.lengths[r]);
}
}
}
}
}
| 2,996 | 38.434211 | 83 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/MaskFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.orc.DataMask;
import org.apache.orc.DataMaskDescription;
import org.apache.orc.TypeDescription;
import java.util.List;
/**
* A mask factory framework that automatically builds a recursive mask.
* The subclass defines how to mask the primitive types and the factory
* builds a recursive tree of data masks that matches the schema tree.
*/
public abstract class MaskFactory {
protected abstract DataMask buildBooleanMask(TypeDescription schema);
protected abstract DataMask buildLongMask(TypeDescription schema);
protected abstract DataMask buildDecimalMask(TypeDescription schema);
protected abstract DataMask buildDoubleMask(TypeDescription schema);
protected abstract DataMask buildStringMask(TypeDescription schema);
protected abstract DataMask buildDateMask(TypeDescription schema);
protected abstract DataMask buildTimestampMask(TypeDescription schema);
protected abstract DataMask buildBinaryMask(TypeDescription schema);
public DataMask build(TypeDescription schema,
DataMask.MaskOverrides overrides) {
switch(schema.getCategory()) {
case BOOLEAN:
return buildBooleanMask(schema);
case BYTE:
case SHORT:
case INT:
case LONG:
return buildLongMask(schema);
case FLOAT:
case DOUBLE:
return buildDoubleMask(schema);
case DECIMAL:
return buildDecimalMask(schema);
case STRING:
case CHAR:
case VARCHAR:
return buildStringMask(schema);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return buildTimestampMask(schema);
case DATE:
return buildDateMask(schema);
case BINARY:
return buildBinaryMask(schema);
case UNION:
return buildUnionMask(schema, overrides);
case STRUCT:
return buildStructMask(schema, overrides);
case LIST:
return buildListMask(schema, overrides);
case MAP:
return buildMapMask(schema, overrides);
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
protected DataMask[] buildChildren(List<TypeDescription> children,
DataMask.MaskOverrides overrides) {
DataMask[] result = new DataMask[children.size()];
for(int i = 0; i < result.length; ++i) {
TypeDescription child = children.get(i);
DataMaskDescription over = overrides.hasOverride(child);
if (over != null) {
result[i] = DataMask.Factory.build(over, child, overrides);
} else {
result[i] = build(child, overrides);
}
}
return result;
}
protected DataMask buildStructMask(TypeDescription schema,
DataMask.MaskOverrides overrides) {
return new StructIdentity(buildChildren(schema.getChildren(), overrides));
}
DataMask buildListMask(TypeDescription schema,
DataMask.MaskOverrides overrides) {
return new ListIdentity(buildChildren(schema.getChildren(), overrides));
}
DataMask buildMapMask(TypeDescription schema,
DataMask.MaskOverrides overrides) {
return new MapIdentity(buildChildren(schema.getChildren(), overrides));
}
DataMask buildUnionMask(TypeDescription schema,
DataMask.MaskOverrides overrides) {
return new UnionIdentity(buildChildren(schema.getChildren(), overrides));
}
}
| 4,268 | 35.801724 | 78 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/MaskProvider.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.orc.DataMask;
import org.apache.orc.DataMaskDescription;
import org.apache.orc.TypeDescription;
/**
* The Provider for all of the built-in data masks.
*/
public class MaskProvider implements DataMask.Provider {
@Override
public DataMask build(DataMaskDescription description,
TypeDescription schema,
DataMask.MaskOverrides overrides) {
String name = description.getName();
if (name.equals(DataMask.Standard.NULLIFY.getName())) {
return new NullifyMask();
} else if (name.equals(DataMask.Standard.REDACT.getName())) {
return new RedactMaskFactory(description.getParameters())
.build(schema, overrides);
} else if(name.equals(DataMask.Standard.SHA256.getName())) {
return new SHA256MaskFactory().build(schema, overrides);
}
return null;
}
}
| 1,715 | 37.133333 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/NullifyMask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.DataMask;
/**
* Masking routine that converts every value to NULL.
*/
public class NullifyMask implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked,
int start, int length) {
masked.noNulls = false;
masked.isRepeating = true;
masked.isNull[0] = true;
}
}
| 1,264 | 34.138889 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/RedactMaskFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.DataMask;
import org.apache.orc.TypeDescription;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Map;
import java.util.SortedMap;
import java.util.TimeZone;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
/**
* Masking strategy that hides most string and numeric values based on unicode
* character categories.
* <p>
* Masking Parameters:
* character replacements: string of 10 characters one per group below
* letter, upper case (default X)
* letter, lower case (default x)
* number, digit (default 9)
* symbol (default $)
* punctuation (default .)
* separator (default no masking)
* letter, other (default ª)
* mark (default ः)
* number, other (default ²)
* other (default )
* <p>
* time replacements: string of 6 numbers or _ one per field below
* year (0 to 4000, default no masking)
* month (1 to 12, default 1)
* date (1 to 31, default 1)
* hour (0 to 23, default 0)
* minute (0 to 59, default 0)
* second (0 to 59, default 0)
* <p>
* Parameters use "_" for preserve original.
*/
public class RedactMaskFactory extends MaskFactory {
/**
* The value to indicate that the value should be preserved.
*/
private static final int UNMASKED_CHAR = "_".codePointAt(0);
private static final int UNMASKED_DATE = -1;
// The default replacements for each character category.
// I picked a character in the same category so that the masking is
// idempotent. For non-ascii characters, I mostly picked the first example.
private static final int DEFAULT_LETTER_UPPER = "X".codePointAt(0);
private static final int DEFAULT_LETTER_LOWER = "x".codePointAt(0);
private static final int DEFAULT_NUMBER_DIGIT = 9;
private static final int DEFAULT_NUMBER_DIGIT_CP =
Integer.toString(DEFAULT_NUMBER_DIGIT).codePointAt(0);
private static final int DEFAULT_SYMBOL = "$".codePointAt(0);
private static final int DEFAULT_PUNCTUATION = ".".codePointAt(0);
private static final int DEFAULT_SEPARATOR = UNMASKED_CHAR;
private static final int DEFAULT_LETTER_OTHER = "\u00AA".codePointAt(0);
private static final int DEFAULT_MARK = "\u0903".codePointAt(0);
private static final int DEFAULT_NUMBER_OTHER = "\u00B2".codePointAt(0);
private static final int DEFAULT_OTHER = "\u06DD".codePointAt(0);
// The replacement codepoint for each character category. We use codepoints
// here so that we don't have to worry about handling long UTF characters
// as special cases.
private final int UPPER_REPLACEMENT;
private final int LOWER_REPLACEMENT;
private final int OTHER_LETTER_REPLACEMENT;
private final int MARK_REPLACEMENT;
private final int DIGIT_CP_REPLACEMENT;
private final int OTHER_NUMBER_REPLACEMENT;
private final int SYMBOL_REPLACEMENT;
private final int PUNCTUATION_REPLACEMENT;
private final int SEPARATOR_REPLACEMENT;
private final int OTHER_REPLACEMENT;
// numeric replacement
private final int DIGIT_REPLACEMENT;
// time replacement
private final int YEAR_REPLACEMENT;
private final int MONTH_REPLACEMENT;
private final int DATE_REPLACEMENT;
private final int HOUR_REPLACEMENT;
private final int MINUTE_REPLACEMENT;
private final int SECOND_REPLACEMENT;
private final boolean maskDate;
private final boolean maskTimestamp;
// index tuples that are not to be masked
private final SortedMap<Integer,Integer> unmaskIndexRanges = new TreeMap<>();
public RedactMaskFactory(String... params) {
ByteBuffer param = params.length < 1 ? ByteBuffer.allocate(0) :
ByteBuffer.wrap(params[0].getBytes(StandardCharsets.UTF_8));
UPPER_REPLACEMENT = getNextCodepoint(param, DEFAULT_LETTER_UPPER);
LOWER_REPLACEMENT = getNextCodepoint(param, DEFAULT_LETTER_LOWER);
DIGIT_CP_REPLACEMENT = getNextCodepoint(param, DEFAULT_NUMBER_DIGIT_CP);
DIGIT_REPLACEMENT = getReplacementDigit(DIGIT_CP_REPLACEMENT);
SYMBOL_REPLACEMENT = getNextCodepoint(param, DEFAULT_SYMBOL);
PUNCTUATION_REPLACEMENT = getNextCodepoint(param, DEFAULT_PUNCTUATION);
SEPARATOR_REPLACEMENT = getNextCodepoint(param, DEFAULT_SEPARATOR);
OTHER_LETTER_REPLACEMENT = getNextCodepoint(param, DEFAULT_LETTER_OTHER);
MARK_REPLACEMENT = getNextCodepoint(param, DEFAULT_MARK);
OTHER_NUMBER_REPLACEMENT = getNextCodepoint(param, DEFAULT_NUMBER_OTHER);
OTHER_REPLACEMENT = getNextCodepoint(param, DEFAULT_OTHER);
String[] timeParams;
if (params.length < 2 || StringUtils.isBlank(params[1])) {
timeParams = null;
} else {
timeParams = params[1].split("\\W+");
}
YEAR_REPLACEMENT = getDateParam(timeParams, 0, UNMASKED_DATE, 4000);
MONTH_REPLACEMENT = getDateParam(timeParams, 1, 1, 12);
DATE_REPLACEMENT = getDateParam(timeParams, 2, 1, 31);
HOUR_REPLACEMENT = getDateParam(timeParams, 3, 0, 23);
MINUTE_REPLACEMENT = getDateParam(timeParams, 4, 0, 59);
SECOND_REPLACEMENT = getDateParam(timeParams, 5, 0, 59);
maskDate = (YEAR_REPLACEMENT != UNMASKED_DATE) ||
(MONTH_REPLACEMENT != UNMASKED_DATE) ||
(DATE_REPLACEMENT != UNMASKED_DATE);
maskTimestamp = maskDate || (HOUR_REPLACEMENT != UNMASKED_DATE) ||
(MINUTE_REPLACEMENT != UNMASKED_DATE) ||
(SECOND_REPLACEMENT != UNMASKED_DATE);
/* un-mask range */
if(!(params.length < 3 || StringUtils.isBlank(params[2]))) {
String[] unmaskIndexes = params[2].split(",");
for(int i=0; i < unmaskIndexes.length; i++ ) {
String[] pair = unmaskIndexes[i].trim().split(":");
unmaskIndexRanges.put(Integer.parseInt(pair[0]), Integer.parseInt(pair[1]));
}
}
}
@Override
protected DataMask buildBooleanMask(TypeDescription schema) {
if (DIGIT_CP_REPLACEMENT == UNMASKED_CHAR) {
return new LongIdentity();
} else {
return new BooleanRedactConverter();
}
}
@Override
protected DataMask buildLongMask(TypeDescription schema) {
if (DIGIT_CP_REPLACEMENT == UNMASKED_CHAR) {
return new LongIdentity();
} else {
return new LongRedactConverter(schema.getCategory());
}
}
@Override
protected DataMask buildDecimalMask(TypeDescription schema) {
if (DIGIT_CP_REPLACEMENT == UNMASKED_CHAR) {
return new DecimalIdentity();
} else {
return new DecimalRedactConverter();
}
}
@Override
protected DataMask buildDoubleMask(TypeDescription schema) {
if (DIGIT_CP_REPLACEMENT == UNMASKED_CHAR) {
return new DoubleIdentity();
} else {
return new DoubleRedactConverter();
}
}
@Override
protected DataMask buildStringMask(TypeDescription schema) {
return new StringConverter();
}
@Override
protected DataMask buildDateMask(TypeDescription schema) {
if (maskDate) {
return new DateRedactConverter();
} else {
return new LongIdentity();
}
}
@Override
protected DataMask buildTimestampMask(TypeDescription schema) {
if (maskTimestamp) {
return new TimestampRedactConverter();
} else {
return new TimestampIdentity();
}
}
@Override
protected DataMask buildBinaryMask(TypeDescription schema) {
return new NullifyMask();
}
class LongRedactConverter implements DataMask {
final long mask;
LongRedactConverter(TypeDescription.Category category) {
switch (category) {
case BYTE:
mask = 0xff;
break;
case SHORT:
mask = 0xffff;
break;
case INT:
mask = 0xffff_ffff;
break;
default:
case LONG:
mask = -1;
break;
}
}
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
LongColumnVector target = (LongColumnVector) masked;
LongColumnVector source = (LongColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.vector[0] = maskLong(source.vector[0]) & mask;
target.isNull[0] = source.isNull[0];
} else {
for(int r = start; r < start + length; ++r) {
target.vector[r] = maskLong(source.vector[r]) & mask;
target.isNull[r] = source.isNull[r];
}
}
}
}
class BooleanRedactConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
LongColumnVector target = (LongColumnVector) masked;
LongColumnVector source = (LongColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.vector[0] = DIGIT_REPLACEMENT == 0 ? 0 : 1;
target.isNull[0] = source.isNull[0];
} else {
for(int r = start; r < start + length; ++r) {
target.vector[r] = DIGIT_REPLACEMENT == 0 ? 0 : 1;
target.isNull[r] = source.isNull[r];
}
}
}
}
class DoubleRedactConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
DoubleColumnVector target = (DoubleColumnVector) masked;
DoubleColumnVector source = (DoubleColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.vector[0] = maskDouble(source.vector[0]);
target.isNull[0] = source.isNull[0];
} else {
for(int r = start; r < start + length; ++r) {
target.vector[r] = maskDouble(source.vector[r]);
target.isNull[r] = source.isNull[r];
}
}
}
}
class StringConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
BytesColumnVector target = (BytesColumnVector) masked;
BytesColumnVector source = (BytesColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
maskString(source, 0, target);
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
maskString(source, r, target);
}
}
}
}
}
class DecimalRedactConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
DecimalColumnVector target = (DecimalColumnVector) masked;
DecimalColumnVector source = (DecimalColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
target.scale = source.scale;
target.precision = source.precision;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
target.vector[0].set(maskDecimal(source.vector[0]));
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
target.vector[r].set(maskDecimal(source.vector[r]));
}
}
}
}
}
class TimestampRedactConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
TimestampColumnVector target = (TimestampColumnVector) masked;
TimestampColumnVector source = (TimestampColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
target.time[0] = maskTime(source.time[0]);
target.nanos[0] = 0;
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
target.time[r] = maskTime(source.time[r]);
target.nanos[r] = 0;
}
}
}
}
}
class DateRedactConverter implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
LongColumnVector target = (LongColumnVector) masked;
LongColumnVector source = (LongColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
target.vector[0] = maskDate((int) source.vector[0]);
}
} else {
for(int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
target.vector[r] = maskDate((int) source.vector[r]);
}
}
}
}
}
/**
* Get the next code point from the ByteBuffer. Moves the position in the
* ByteBuffer forward to the next code point.
* @param param the source of bytes
* @param defaultValue if there are no bytes left, use this value
* @return the code point that was found at the front of the buffer.
*/
static int getNextCodepoint(ByteBuffer param, int defaultValue) {
if (param.remaining() == 0) {
return defaultValue;
} else {
return Text.bytesToCodePoint(param);
}
}
/**
* Get the replacement digit. This routine supports non-ASCII values for the
* replacement. For example, if the user gives one of "7", "७", "〧" or "፯"
* the value is 7.
* @param digitCodePoint the code point that is replacing digits
* @return the number from 0 to 9 to use as the numeric replacement
*/
static int getReplacementDigit(int digitCodePoint) {
int dig = Character.getNumericValue(digitCodePoint);
if (dig >= 0 && dig <= 9) {
return dig;
} else {
return DEFAULT_NUMBER_DIGIT;
}
}
static int getDateParam(String[] dateParams, int posn,
int myDefault, int max) {
if (dateParams != null && posn < dateParams.length) {
if (dateParams[posn].codePointAt(0) == UNMASKED_CHAR) {
return UNMASKED_DATE;
} else {
int result = Integer.parseInt(dateParams[posn]);
if (result >= -1 && result <= max) {
return result;
} else {
throw new IllegalArgumentException("Invalid date parameter " + posn +
" of " + dateParams[posn] + " greater than " + max);
}
}
} else {
return myDefault;
}
}
/**
* Replace each digit in value with DIGIT_REPLACEMENT scaled to the matching
* number of digits.
* @param value the number to mask
* @return the masked value
*/
public long maskLong(long value) {
/* check whether unmasking range provided */
if (!unmaskIndexRanges.isEmpty()) {
return maskLongWithUnmasking(value);
}
long base;
if (DIGIT_REPLACEMENT == 0) {
return 0;
} else if (value >= 0) {
base = 1;
} else {
base = -1;
// make sure Long.MIN_VALUE doesn't overflow
if (value == Long.MIN_VALUE) {
value = Long.MAX_VALUE;
} else {
value = -value;
}
}
if (value < 100_000_000L) {
if (value < 10_000L) {
if (value < 100L) {
if (value < 10L) {
base *= 1;
} else {
base *= 11;
}
} else if (value < 1_000L) {
base *= 111;
} else {
base *= 1_111;
}
} else if (value < 1_000_000L) {
if (value < 100_000L) {
base *= 11_111;
} else {
base *= 111_111;
}
} else if (value < 10_000_000L) {
base *= 1_111_111;
} else {
base *= 11_111_111;
}
} else if (value < 10_000_000_000_000_000L) {
if (value < 1_000_000_000_000L) {
if (value < 10_000_000_000L) {
if (value < 1_000_000_000L) {
base *= 111_111_111;
} else {
base *= 1_111_111_111;
}
} else if (value < 100_000_000_000L) {
base *= 11_111_111_111L;
} else {
base *= 111_111_111_111L;
}
} else if (value < 100_000_000_000_000L) {
if (value < 10_000_000_000_000L) {
base *= 1_111_111_111_111L;
} else {
base *= 11_111_111_111_111L;
}
} else if (value < 1_000_000_000_000_000L) {
base *= 111_111_111_111_111L;
} else {
base *= 1_111_111_111_111_111L;
}
} else if (value < 100_000_000_000_000_000L) {
base *= 11_111_111_111_111_111L;
// If the digit is 9, it would overflow at 19 digits, so use 18.
} else if (value < 1_000_000_000_000_000_000L || DIGIT_REPLACEMENT == 9) {
base *= 111_111_111_111_111_111L;
} else {
base *= 1_111_111_111_111_111_111L;
}
return DIGIT_REPLACEMENT * base;
}
private static final double[] DOUBLE_POWER_10 = new double[]{
1e-308, 1e-307, 1e-306, 1e-305, 1e-304, 1e-303, 1e-302, 1e-301, 1e-300,
1e-299, 1e-298, 1e-297, 1e-296, 1e-295, 1e-294, 1e-293, 1e-292, 1e-291,
1e-290, 1e-289, 1e-288, 1e-287, 1e-286, 1e-285, 1e-284, 1e-283, 1e-282,
1e-281, 1e-280, 1e-279, 1e-278, 1e-277, 1e-276, 1e-275, 1e-274, 1e-273,
1e-272, 1e-271, 1e-270, 1e-269, 1e-268, 1e-267, 1e-266, 1e-265, 1e-264,
1e-263, 1e-262, 1e-261, 1e-260, 1e-259, 1e-258, 1e-257, 1e-256, 1e-255,
1e-254, 1e-253, 1e-252, 1e-251, 1e-250, 1e-249, 1e-248, 1e-247, 1e-246,
1e-245, 1e-244, 1e-243, 1e-242, 1e-241, 1e-240, 1e-239, 1e-238, 1e-237,
1e-236, 1e-235, 1e-234, 1e-233, 1e-232, 1e-231, 1e-230, 1e-229, 1e-228,
1e-227, 1e-226, 1e-225, 1e-224, 1e-223, 1e-222, 1e-221, 1e-220, 1e-219,
1e-218, 1e-217, 1e-216, 1e-215, 1e-214, 1e-213, 1e-212, 1e-211, 1e-210,
1e-209, 1e-208, 1e-207, 1e-206, 1e-205, 1e-204, 1e-203, 1e-202, 1e-201,
1e-200, 1e-199, 1e-198, 1e-197, 1e-196, 1e-195, 1e-194, 1e-193, 1e-192,
1e-191, 1e-190, 1e-189, 1e-188, 1e-187, 1e-186, 1e-185, 1e-184, 1e-183,
1e-182, 1e-181, 1e-180, 1e-179, 1e-178, 1e-177, 1e-176, 1e-175, 1e-174,
1e-173, 1e-172, 1e-171, 1e-170, 1e-169, 1e-168, 1e-167, 1e-166, 1e-165,
1e-164, 1e-163, 1e-162, 1e-161, 1e-160, 1e-159, 1e-158, 1e-157, 1e-156,
1e-155, 1e-154, 1e-153, 1e-152, 1e-151, 1e-150, 1e-149, 1e-148, 1e-147,
1e-146, 1e-145, 1e-144, 1e-143, 1e-142, 1e-141, 1e-140, 1e-139, 1e-138,
1e-137, 1e-136, 1e-135, 1e-134, 1e-133, 1e-132, 1e-131, 1e-130, 1e-129,
1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120,
1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111,
1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, 1e-102,
1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, 1e-95, 1e-94, 1e-93,
1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, 1e-85, 1e-84,
1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, 1e-75,
1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66,
1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57,
1e-56, 1e-55, 1e-54, 1e-53, 1e-52, 1e-51, 1e-50, 1e-49, 1e-48,
1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39,
1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30,
1e-29, 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21,
1e-20, 1e-19, 1e-18, 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12,
1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3,
1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6,
1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15,
1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 1e23, 1e24,
1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31, 1e32, 1e33,
1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40, 1e41, 1e42,
1e43, 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50, 1e51,
1e52, 1e53, 1e54, 1e55, 1e56, 1e57, 1e58, 1e59, 1e60,
1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67, 1e68, 1e69,
1e70, 1e71, 1e72, 1e73, 1e74, 1e75, 1e76, 1e77, 1e78,
1e79, 1e80, 1e81, 1e82, 1e83, 1e84, 1e85, 1e86, 1e87,
1e88, 1e89, 1e90, 1e91, 1e92, 1e93, 1e94, 1e95, 1e96,
1e97, 1e98, 1e99, 1e100, 1e101, 1e102, 1e103, 1e104, 1e105,
1e106, 1e107, 1e108, 1e109, 1e110, 1e111, 1e112, 1e113, 1e114,
1e115, 1e116, 1e117, 1e118, 1e119, 1e120, 1e121, 1e122, 1e123,
1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130, 1e131, 1e132,
1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140, 1e141,
1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150,
1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159,
1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168,
1e169, 1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177,
1e178, 1e179, 1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186,
1e187, 1e188, 1e189, 1e190, 1e191, 1e192, 1e193, 1e194, 1e195,
1e196, 1e197, 1e198, 1e199, 1e200, 1e201, 1e202, 1e203, 1e204,
1e205, 1e206, 1e207, 1e208, 1e209, 1e210, 1e211, 1e212, 1e213,
1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220, 1e221, 1e222,
1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230, 1e231,
1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240,
1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249,
1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258,
1e259, 1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267,
1e268, 1e269, 1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276,
1e277, 1e278, 1e279, 1e280, 1e281, 1e282, 1e283, 1e284, 1e285,
1e286, 1e287, 1e288, 1e289, 1e290, 1e291, 1e292, 1e293, 1e294,
1e295, 1e296, 1e297, 1e298, 1e299, 1e300, 1e301, 1e302, 1e303,
1e304, 1e305, 1e306, 1e307};
/**
* Replace each digit in value with digit.
* @param value the number to mask
* @return the
*/
public double maskDouble(double value) {
/* check whether unmasking range provided */
if (!unmaskIndexRanges.isEmpty()) {
return maskDoubleWIthUnmasking(value);
}
double base;
// It seems better to mask 0 to 9.99999 rather than 9.99999e-308.
if (value == 0 || DIGIT_REPLACEMENT == 0) {
return DIGIT_REPLACEMENT * 1.11111;
} else if (value > 0) {
base = 1.11111;
} else {
base = -1.11111;
value = -value;
}
int posn = Arrays.binarySearch(DOUBLE_POWER_10, value);
if (posn < -DOUBLE_POWER_10.length - 2) {
posn = DOUBLE_POWER_10.length - 1;
} else if (posn == -1) {
posn = 0;
} else if (posn < 0) {
posn = -posn -2;
}
return DIGIT_REPLACEMENT * base * DOUBLE_POWER_10[posn];
}
private final Calendar scratch = Calendar.getInstance();
/**
* Given the requested masking parameters, redact the given time
* @param millis the original time
* @return the millis after it has been masked
*/
long maskTime(long millis) {
scratch.setTimeInMillis(millis);
if (YEAR_REPLACEMENT != UNMASKED_DATE) {
scratch.set(Calendar.YEAR, YEAR_REPLACEMENT);
}
if (MONTH_REPLACEMENT != UNMASKED_DATE) {
scratch.set(Calendar.MONTH, MONTH_REPLACEMENT - 1);
}
if (DATE_REPLACEMENT != UNMASKED_DATE) {
scratch.set(Calendar.DATE, DATE_REPLACEMENT);
}
if (HOUR_REPLACEMENT != UNMASKED_DATE) {
if (HOUR_REPLACEMENT >= 12) {
scratch.set(Calendar.HOUR, HOUR_REPLACEMENT - 12);
scratch.set(Calendar.AM_PM, Calendar.PM);
} else {
scratch.set(Calendar.HOUR, HOUR_REPLACEMENT);
scratch.set(Calendar.AM_PM, Calendar.AM);
}
}
if (MINUTE_REPLACEMENT != UNMASKED_DATE) {
scratch.set(Calendar.MINUTE, MINUTE_REPLACEMENT);
}
if (SECOND_REPLACEMENT != UNMASKED_DATE) {
scratch.set(Calendar.SECOND, SECOND_REPLACEMENT);
scratch.set(Calendar.MILLISECOND, 0);
}
return scratch.getTimeInMillis();
}
private static final long MILLIS_PER_DAY = TimeUnit.DAYS.toMillis(1);
private final Calendar utcScratch =
Calendar.getInstance(TimeZone.getTimeZone("UTC"));
/**
* Given a date as the number of days since epoch (1 Jan 1970),
* mask the date given the parameters.
* @param daysSinceEpoch the number of days after epoch
* @return the number of days after epoch when masked
*/
int maskDate(int daysSinceEpoch) {
utcScratch.setTimeInMillis(daysSinceEpoch * MILLIS_PER_DAY);
if (YEAR_REPLACEMENT != UNMASKED_DATE) {
utcScratch.set(Calendar.YEAR, YEAR_REPLACEMENT);
}
if (MONTH_REPLACEMENT != UNMASKED_DATE) {
utcScratch.set(Calendar.MONTH, MONTH_REPLACEMENT - 1);
}
if (DATE_REPLACEMENT != UNMASKED_DATE) {
utcScratch.set(Calendar.DATE, DATE_REPLACEMENT);
}
return (int) (utcScratch.getTimeInMillis() / MILLIS_PER_DAY);
}
/**
* Mask a decimal.
* This is painfully slow because it converts to a string and then back to
* a decimal. Until HiveDecimalWritable gives us more access, this is
* the best tradeoff between developer time, functionality, and run time.
* @param source the value to mask
* @return the masked value.
*/
HiveDecimalWritable maskDecimal(HiveDecimalWritable source) {
return new HiveDecimalWritable(maskNumericString(source.toString()));
}
/**
* Given a UTF code point, find the replacement codepoint
* @param codepoint a UTF character
* @return the replacement codepoint
*/
int getReplacement(int codepoint) {
switch (Character.getType(codepoint)) {
case Character.UPPERCASE_LETTER:
return UPPER_REPLACEMENT;
case Character.LOWERCASE_LETTER:
return LOWER_REPLACEMENT;
case Character.TITLECASE_LETTER:
case Character.MODIFIER_LETTER:
case Character.OTHER_LETTER:
return OTHER_LETTER_REPLACEMENT;
case Character.NON_SPACING_MARK:
case Character.ENCLOSING_MARK:
case Character.COMBINING_SPACING_MARK:
return MARK_REPLACEMENT;
case Character.DECIMAL_DIGIT_NUMBER:
return DIGIT_CP_REPLACEMENT;
case Character.LETTER_NUMBER:
case Character.OTHER_NUMBER:
return OTHER_NUMBER_REPLACEMENT;
case Character.SPACE_SEPARATOR:
case Character.LINE_SEPARATOR:
case Character.PARAGRAPH_SEPARATOR:
return SEPARATOR_REPLACEMENT;
case Character.MATH_SYMBOL:
case Character.CURRENCY_SYMBOL:
case Character.MODIFIER_SYMBOL:
case Character.OTHER_SYMBOL:
return SYMBOL_REPLACEMENT;
case Character.DASH_PUNCTUATION:
case Character.START_PUNCTUATION:
case Character.END_PUNCTUATION:
case Character.CONNECTOR_PUNCTUATION:
case Character.OTHER_PUNCTUATION:
return PUNCTUATION_REPLACEMENT;
default:
return OTHER_REPLACEMENT;
}
}
/**
* Get the number of bytes for each codepoint
* @param codepoint the codepoint to check
* @return the number of bytes
*/
static int getCodepointLength(int codepoint) {
if (codepoint < 0) {
throw new IllegalArgumentException("Illegal codepoint " + codepoint);
} else if (codepoint < 0x80) {
return 1;
} else if (codepoint < 0x7ff) {
return 2;
} else if (codepoint < 0xffff) {
return 3;
} else if (codepoint < 0x10FFFF) {
return 4;
} else {
throw new IllegalArgumentException("Illegal codepoint " + codepoint);
}
}
/**
* Write the give codepoint to the buffer.
* @param codepoint the codepoint to write
* @param buffer the buffer to write into
* @param offset the first offset to use
* @param length the number of bytes that will be used
*/
static void writeCodepoint(int codepoint, byte[] buffer, int offset,
int length) {
switch (length) {
case 1:
buffer[offset] = (byte) codepoint;
break;
case 2:
buffer[offset] = (byte)(0xC0 | codepoint >> 6);
buffer[offset+1] = (byte)(0x80 | (codepoint & 0x3f));
break;
case 3:
buffer[offset] = (byte)(0xE0 | codepoint >> 12);
buffer[offset+1] = (byte)(0x80 | ((codepoint >> 6) & 0x3f));
buffer[offset+2] = (byte)(0x80 | (codepoint & 0x3f));
break;
case 4:
buffer[offset] = (byte)(0xF0 | codepoint >> 18);
buffer[offset+1] = (byte)(0x80 | ((codepoint >> 12) & 0x3f));
buffer[offset+2] = (byte)(0x80 | ((codepoint >> 6) & 0x3f));
buffer[offset+3] = (byte)(0x80 | (codepoint & 0x3f));
break;
default:
throw new IllegalArgumentException("Invalid length for codepoint " +
codepoint + " = " + length);
}
}
/**
* Mask a string by finding the character category of each character
* and replacing it with the matching literal.
* @param source the source column vector
* @param row the value index
* @param target the target column vector
*/
void maskString(BytesColumnVector source, int row, BytesColumnVector target) {
int expectedBytes = source.length[row];
ByteBuffer sourceBytes = ByteBuffer.wrap(source.vector[row],
source.start[row], source.length[row]);
// ensure we have enough space, if the masked data is the same size
target.ensureValPreallocated(expectedBytes);
byte[] outputBuffer = target.getValPreallocatedBytes();
int outputOffset = target.getValPreallocatedStart();
int outputStart = outputOffset;
int index = 0;
while (sourceBytes.remaining() > 0) {
int cp = Text.bytesToCodePoint(sourceBytes);
// Find the replacement for the current character.
int replacement = getReplacement(cp);
if (replacement == UNMASKED_CHAR || isIndexInUnmaskRange(index, source.length[row])) {
replacement = cp;
}
// increment index
index++;
int len = getCodepointLength(replacement);
// If the translation will overflow the buffer, we need to resize.
// This will only happen when the masked size is larger than the original.
if (len + outputOffset > outputBuffer.length) {
// Revise estimate how much we are going to need now. We are maximally
// pesamistic here so that we don't have to expand again for this value.
int currentOutputStart = outputStart;
int currentOutputLength = outputOffset - currentOutputStart;
expectedBytes = currentOutputLength + len + sourceBytes.remaining() * 4;
// Expand the buffer to fit the new estimate
target.ensureValPreallocated(expectedBytes);
// Copy over the bytes we've already written for this value and move
// the pointers to the new output buffer.
byte[] oldBuffer = outputBuffer;
outputBuffer = target.getValPreallocatedBytes();
outputOffset = target.getValPreallocatedStart();
outputStart = outputOffset;
System.arraycopy(oldBuffer, currentOutputStart, outputBuffer,
outputOffset, currentOutputLength);
outputOffset += currentOutputLength;
}
// finally copy the bytes
writeCodepoint(replacement, outputBuffer, outputOffset, len);
outputOffset += len;
}
target.setValPreallocated(row, outputOffset - outputStart);
}
static final long OVERFLOW_REPLACEMENT = 111_111_111_111_111_111L;
/**
* A function that masks longs when there are unmasked ranges.
* @param value the original value
* @return the masked value
*/
long maskLongWithUnmasking(long value) throws IndexOutOfBoundsException {
try {
return Long.parseLong(maskNumericString(Long.toString(value)));
} catch (NumberFormatException nfe) {
return OVERFLOW_REPLACEMENT * DIGIT_REPLACEMENT;
}
}
/**
* A function that masks doubles when there are unmasked ranges.
* @param value original value
* @return masked value
*/
double maskDoubleWIthUnmasking(final double value) {
try {
return Double.parseDouble(maskNumericString(Double.toString(value)));
} catch (NumberFormatException nfe) {
return OVERFLOW_REPLACEMENT * DIGIT_REPLACEMENT;
}
}
/**
* Mask the given stringified numeric value excluding the unmask range.
* Non-digit characters are passed through on the assumption they are
* markers (eg. one of ",.ef").
* @param value the original value.
*/
String maskNumericString(final String value) {
StringBuilder result = new StringBuilder();
final int length = value.codePointCount(0, value.length());
for(int c=0; c < length; ++c) {
int cp = value.codePointAt(c);
if (isIndexInUnmaskRange(c, length) ||
Character.getType(cp) != Character.DECIMAL_DIGIT_NUMBER) {
result.appendCodePoint(cp);
} else {
result.appendCodePoint(DIGIT_CP_REPLACEMENT);
}
}
return result.toString();
}
/**
* Given an index and length of a string
* find out whether it is in a given un-mask range.
* @param index the character point index
* @param length the length of the string in character points
* @return true if the index is in un-mask range else false.
*/
private boolean isIndexInUnmaskRange(final int index, final int length) {
for(final Map.Entry<Integer, Integer> pair : unmaskIndexRanges.entrySet()) {
int start;
int end;
if(pair.getKey() >= 0) {
// for positive indexes
start = pair.getKey();
} else {
// for negative indexes
start = length + pair.getKey();
}
if(pair.getValue() >= 0) {
// for positive indexes
end = pair.getValue();
} else {
// for negative indexes
end = length + pair.getValue();
}
// if the given index is in range
if(index >= start && index <= end ) {
return true;
}
}
return false;
}
}
| 35,927 | 35.773797 | 92 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/SHA256MaskFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.DataMask;
import org.apache.orc.TypeDescription;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* <p>
* Masking strategy that masks String, Varchar, Char and Binary types
* as SHA 256 hash.
* </p>
* <p>
* <b>For String type:</b>
* All string type of any length will be converted to 64 character length
* SHA256 hash encoded in hexadecimal.
* </p>
* <p>
* <b>For Varchar type:</b>
* For Varchar type, max-length property will be honored i.e.
* if the length is less than max-length then the SHA256 hash will be truncated
* to max-length. If max-length is greater than 64 then the output is the
* sha256 length, which is 64.
* </p>
* <p>
* <b>For Char type:</b>
* For Char type, the length of mask will always be equal to specified
* max-length. If the given length (max-length) is less than SHA256 hash
* length (64) the mask will be truncated.
* If the given length (max-length) is greater than SHA256 hash length (64)
* then the mask will be padded by blank spaces.
* </p>
* <p>
* <b>For Binary type:</b>
* All Binary type of any length will be converted to 32 byte length SHA256
* hash.
* </p>
*/
public class SHA256MaskFactory extends MaskFactory {
private final MessageDigest md;
SHA256MaskFactory() {
super();
try {
md = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private static final char[] DIGITS = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
};
public static String printHexBinary(byte[] data) {
final char[] out = new char[data.length << 1];
for (int i = 0, j = 0; i < data.length; i++) {
out[j++] = DIGITS[(0xF0 & data[i]) >>> 4];
out[j++] = DIGITS[0x0F & data[i]];
}
return new String(out);
}
/**
* Mask a string by finding the character category of each character
* and replacing it with the matching literal.
*
* @param source the source column vector
* @param row the value index
* @param target the target column vector
* @param schema schema
*/
void maskString(final BytesColumnVector source, final int row,
final BytesColumnVector target, final TypeDescription schema) {
// take SHA-256 Hash and convert to HEX
md.update(source.vector[row], source.start[row], source.length[row]);
byte[] hash = printHexBinary(md.digest()).getBytes(StandardCharsets.UTF_8);
int targetLength = hash.length;
switch (schema.getCategory()) {
case VARCHAR: {
/* truncate the hash if max length for varchar is less than hash length
* on the other hand if if the max length is more than hash length (64
* bytes) we use the hash length (64 bytes) always.
*/
if (schema.getMaxLength() < hash.length) {
targetLength = schema.getMaxLength();
}
break;
}
case CHAR: {
/* for char the length is always constant */
targetLength = schema.getMaxLength();
/* pad the hash with blank char if targetlength is greater than hash */
if (targetLength > hash.length) {
byte[] tmp = Arrays.copyOf(hash, targetLength);
Arrays.fill(tmp, hash.length, tmp.length - 1, (byte) ' ');
hash = tmp;
}
break;
}
default: {
break;
}
}
target.vector[row] = hash;
target.start[row] = 0;
target.length[row] = targetLength;
}
/**
* Helper function to mask binary data with it's SHA-256 hash.
*
* @param source the source data
* @param row the row that we are translating
* @param target the output data
*/
void maskBinary(final BytesColumnVector source, final int row,
final BytesColumnVector target) {
final ByteBuffer sourceBytes = ByteBuffer
.wrap(source.vector[row], source.start[row], source.length[row]);
// take SHA-256 Hash and keep binary
byte[] hash = md.digest(sourceBytes.array());
int targetLength = hash.length;
target.vector[row] = hash;
target.start[row] = 0;
target.length[row] = targetLength;
}
@Override
protected DataMask buildBinaryMask(TypeDescription schema) {
return new BinaryMask();
}
@Override
protected DataMask buildBooleanMask(TypeDescription schema) {
return new NullifyMask();
}
@Override
protected DataMask buildLongMask(TypeDescription schema) {
return new NullifyMask();
}
@Override
protected DataMask buildDecimalMask(TypeDescription schema) {
return new NullifyMask();
}
@Override
protected DataMask buildDoubleMask(TypeDescription schema) {
return new NullifyMask();
}
@Override
protected DataMask buildStringMask(final TypeDescription schema) {
return new StringMask(schema);
}
@Override
protected DataMask buildDateMask(TypeDescription schema) {
return new NullifyMask();
}
@Override
protected DataMask buildTimestampMask(TypeDescription schema) {
return new NullifyMask();
}
/**
* Data mask for String, Varchar and Char types.
*/
class StringMask implements DataMask {
final TypeDescription schema;
/* create an instance */
StringMask(TypeDescription schema) {
super();
this.schema = schema;
}
/**
* Mask the given range of values
*
* @param original the original input data
* @param masked the masked output data
* @param start the first data element to mask
* @param length the number of data elements to mask
*/
@Override
public void maskData(final ColumnVector original, final ColumnVector masked,
final int start, final int length) {
final BytesColumnVector target = (BytesColumnVector) masked;
final BytesColumnVector source = (BytesColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
maskString(source, 0, target, schema);
}
} else {
for (int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
maskString(source, r, target, schema);
}
}
}
}
}
/**
* Mask for binary data
*/
class BinaryMask implements DataMask {
/* create an instance */
BinaryMask() {
super();
}
/**
* Mask the given range of values
*
* @param original the original input data
* @param masked the masked output data
* @param start the first data element to mask
* @param length the number of data elements to mask
*/
@Override
public void maskData(final ColumnVector original, final ColumnVector masked,
final int start, final int length) {
final BytesColumnVector target = (BytesColumnVector) masked;
final BytesColumnVector source = (BytesColumnVector) original;
target.noNulls = original.noNulls;
target.isRepeating = original.isRepeating;
if (original.isRepeating) {
target.isNull[0] = source.isNull[0];
if (target.noNulls || !target.isNull[0]) {
maskBinary(source, 0, target);
}
} else {
for (int r = start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (target.noNulls || !target.isNull[r]) {
maskBinary(source, r, target);
}
}
}
}
}
}
| 8,722 | 28.37037 | 80 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/StructIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.orc.DataMask;
/**
* A data mask for struct types that applies the given masks to its
* children, but doesn't mask at this level.
*/
public class StructIdentity implements DataMask {
private final DataMask[] children;
StructIdentity(DataMask[] children) {
this.children = children;
}
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
StructColumnVector source = (StructColumnVector) original;
StructColumnVector target = (StructColumnVector) masked;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (source.isRepeating) {
target.isNull[0] = source.isNull[0];
if (source.noNulls || !source.isNull[0]) {
for (int c = 0; c < children.length; ++c) {
children[c].maskData(source.fields[c], target.fields[c], 0, 1);
}
}
} else if (source.noNulls) {
for (int c = 0; c < children.length; ++c) {
children[c].maskData(source.fields[c], target.fields[c], start, length);
}
} else {
// process the children in runs of non-null values
int batchStart = start;
while (batchStart < start + length) {
int r = batchStart;
while (r < start + length && !source.isNull[r]) {
r += 1;
}
if (r != batchStart) {
for(int c=0; c < children.length; ++c) {
children[c].maskData(source.fields[c], target.fields[c],
batchStart, r - batchStart);
}
}
batchStart = r;
while (batchStart < start + length && source.isNull[batchStart]) {
batchStart += 1;
}
}
}
}
}
| 2,685 | 34.813333 | 80 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/TimestampIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.orc.DataMask;
class TimestampIdentity implements DataMask {
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
TimestampColumnVector target = (TimestampColumnVector) masked;
TimestampColumnVector source = (TimestampColumnVector) original;
target.noNulls = source.noNulls;
target.isRepeating = source.isRepeating;
if (original.isRepeating) {
target.time[0] = source.time[0];
target.nanos[0] = source.nanos[0];
target.isNull[0] = source.isNull[0];
} else if (source.noNulls) {
for(int r = start; r < start + length; ++r) {
target.time[r] = source.time[r];
target.nanos[r] = source.nanos[r];
}
} else {
for(int r = start; r < start + length; ++r) {
target.time[r] = source.time[r];
target.nanos[r] = source.nanos[r];
target.isNull[r] = source.isNull[r];
}
}
}
}
| 1,941 | 35.641509 | 77 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/mask/UnionIdentity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.orc.DataMask;
/**
* A data mask for union types that applies the given masks to its
* children, but doesn't mask at this level.
*/
public class UnionIdentity implements DataMask {
private final DataMask[] children;
UnionIdentity(DataMask[] children) {
this.children = children;
}
@Override
public void maskData(ColumnVector original, ColumnVector masked, int start,
int length) {
UnionColumnVector source = (UnionColumnVector) original;
UnionColumnVector target = (UnionColumnVector) masked;
target.isRepeating = source.isRepeating;
target.noNulls = source.noNulls;
if (source.isRepeating) {
target.isNull[0] = source.isNull[0];
if (source.noNulls || !source.isNull[0]) {
int tag = source.tags[0];
target.tags[0] = tag;
children[tag].maskData(source.fields[tag], target.fields[tag], 0, 1);
}
} else if (source.noNulls) {
for (int r = start; r < start + length; ++r) {
int tag = source.tags[r];
target.tags[r] = tag;
children[tag].maskData(source.fields[tag], target.fields[tag], r, 1);
}
} else {
for(int r= start; r < start + length; ++r) {
target.isNull[r] = source.isNull[r];
if (!source.isNull[r]) {
int tag = source.tags[r];
target.tags[r] = tag;
children[tag].maskData(source.fields[tag], target.fields[tag], r, 1);
}
}
}
}
}
| 2,435 | 35.358209 | 79 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/ReaderEncryption.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.BufferChunk;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.KeyProvider;
import org.apache.orc.impl.MaskDescriptionImpl;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.List;
public class ReaderEncryption {
private final KeyProvider keyProvider;
private final ReaderEncryptionKey[] keys;
private final MaskDescriptionImpl[] masks;
private final ReaderEncryptionVariant[] variants;
// Mapping from each column to the next variant to try for that column.
// A value of variants.length means no encryption
private final ReaderEncryptionVariant[] columnVariants;
public ReaderEncryption() {
keyProvider = null;
keys = new ReaderEncryptionKey[0];
masks = new MaskDescriptionImpl[0];
variants = new ReaderEncryptionVariant[0];
columnVariants = null;
}
public ReaderEncryption(OrcProto.Footer footer,
TypeDescription schema,
long stripeStatisticsOffset,
BufferChunk serializedTail,
List<StripeInformation> stripes,
KeyProvider provider,
Configuration conf) throws IOException {
if (footer == null || !footer.hasEncryption()) {
keyProvider = null;
keys = new ReaderEncryptionKey[0];
masks = new MaskDescriptionImpl[0];
variants = new ReaderEncryptionVariant[0];
columnVariants = null;
} else {
keyProvider = provider != null ? provider :
CryptoUtils.getKeyProvider(conf, new SecureRandom());
OrcProto.Encryption encrypt = footer.getEncryption();
masks = new MaskDescriptionImpl[encrypt.getMaskCount()];
for(int m=0; m < masks.length; ++m) {
masks[m] = new MaskDescriptionImpl(m, encrypt.getMask(m));
}
keys = new ReaderEncryptionKey[encrypt.getKeyCount()];
for(int k=0; k < keys.length; ++k) {
keys[k] = new ReaderEncryptionKey(encrypt.getKey(k));
}
variants = new ReaderEncryptionVariant[encrypt.getVariantsCount()];
long offset = stripeStatisticsOffset;
for(int v=0; v < variants.length; ++v) {
OrcProto.EncryptionVariant variant = encrypt.getVariants(v);
variants[v] = new ReaderEncryptionVariant(keys[variant.getKey()], v,
variant, schema, stripes, offset, serializedTail, keyProvider);
offset += variants[v].getStripeStatisticsLength();
}
columnVariants = new ReaderEncryptionVariant[schema.getMaximumId() + 1];
for (ReaderEncryptionVariant variant : variants) {
TypeDescription root = variant.getRoot();
for (int c = root.getId(); c <= root.getMaximumId(); ++c) {
// set the variant if it is the first one that we've found
if (columnVariants[c] == null) {
columnVariants[c] = variant;
}
}
}
}
}
public MaskDescriptionImpl[] getMasks() {
return masks;
}
public ReaderEncryptionKey[] getKeys() {
return keys;
}
public ReaderEncryptionVariant[] getVariants() {
return variants;
}
/**
* Find the next possible variant in this file for the given column.
* @param column the column to find a variant for
* @param lastVariant the previous variant that we looked at
* @return the next variant or null if there isn't one
*/
private ReaderEncryptionVariant findNextVariant(int column,
int lastVariant) {
for(int v = lastVariant + 1; v < variants.length; ++v) {
TypeDescription root = variants[v].getRoot();
if (root.getId() <= column && column <= root.getMaximumId()) {
return variants[v];
}
}
return null;
}
/**
* Get the variant for a given column that the user has access to.
* If we haven't tried a given key, try to decrypt this variant's footer key
* to see if the KeyProvider will give it to us. If not, continue to the
* next variant.
* @param column the column id
* @return the encryption variant or null if there isn't one
*/
public ReaderEncryptionVariant getVariant(int column) {
if (columnVariants == null) {
return null;
} else {
while (columnVariants[column] != null &&
!columnVariants[column].getKeyDescription().isAvailable()) {
if (keyProvider != null) {
columnVariants[column] =
findNextVariant(column, columnVariants[column].getVariantId());
}
}
return columnVariants[column];
}
}
}
| 5,597 | 36.57047 | 78 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/ReaderEncryptionKey.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.EncryptionKey;
import org.apache.orc.OrcProto;
import org.apache.orc.impl.HadoopShims;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* This tracks the keys for reading encrypted columns.
*/
public class ReaderEncryptionKey implements EncryptionKey {
private final String name;
private final int version;
private final EncryptionAlgorithm algorithm;
private final List<ReaderEncryptionVariant> roots = new ArrayList<>();
/**
* Store the state of whether we've tried to decrypt a local key using this
* key or not. If it fails the first time, we assume the user doesn't have
* permission and move on. However, we don't want to retry the same failed
* key over and over again.
*/
public enum State {
UNTRIED,
FAILURE,
SUCCESS
}
private State state = State.UNTRIED;
public ReaderEncryptionKey(OrcProto.EncryptionKey key) {
name = key.getKeyName();
version = key.getKeyVersion();
algorithm =
EncryptionAlgorithm.fromSerialization(key.getAlgorithm().getNumber());
}
@Override
public String getKeyName() {
return name;
}
@Override
public int getKeyVersion() {
return version;
}
@Override
public EncryptionAlgorithm getAlgorithm() {
return algorithm;
}
@Override
public ReaderEncryptionVariant[] getEncryptionRoots() {
return roots.toArray(new ReaderEncryptionVariant[0]);
}
public HadoopShims.KeyMetadata getMetadata() {
return new HadoopShims.KeyMetadata(name, version, algorithm);
}
public State getState() {
return state;
}
public void setFailure() {
state = State.FAILURE;
}
public void setSuccess() {
if (state == State.FAILURE) {
throw new IllegalStateException("Key " + name + " had already failed.");
}
state = State.SUCCESS;
}
void addVariant(ReaderEncryptionVariant newVariant) {
roots.add(newVariant);
}
@Override
public boolean equals(Object other) {
if (other == null || getClass() != other.getClass()) {
return false;
} else if (other == this) {
return true;
} else {
return compareTo((EncryptionKey) other) == 0;
}
}
@Override
public int hashCode() {
return name.hashCode() * 127 + version * 7 + algorithm.hashCode();
}
@Override
public int compareTo(@NotNull EncryptionKey other) {
int result = name.compareTo(other.getKeyName());
if (result == 0) {
result = Integer.compare(version, other.getKeyVersion());
}
return result;
}
@Override
public String toString() {
return name + "@" + version + " w/ " + algorithm;
}
@Override
public boolean isAvailable() {
if (getState() == ReaderEncryptionKey.State.SUCCESS) {
return true;
} else if (getState() == ReaderEncryptionKey.State.UNTRIED &&
roots.size() > 0) {
// Check to see if we can decrypt the footer key of the first variant.
try {
return roots.get(0).getFileFooterKey() != null;
} catch (IOException ioe) {
setFailure();
}
}
return false;
}
}
| 4,052 | 26.02 | 78 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/ReaderEncryptionVariant.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.EncryptionKey;
import org.apache.orc.EncryptionVariant;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.BufferChunk;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.InStream;
import org.apache.orc.impl.KeyProvider;
import org.apache.orc.impl.LocalKey;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.StripeStatisticsImpl;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.security.Key;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Information about an encrypted column.
*/
public class ReaderEncryptionVariant implements EncryptionVariant {
private static final Logger LOG =
LoggerFactory.getLogger(ReaderEncryptionVariant.class);
private final KeyProvider provider;
private final ReaderEncryptionKey key;
private final TypeDescription column;
private final int variantId;
private final BufferChunk tailBuffer;
private final List<OrcProto.Stream> stripeStats;
private final LocalKey[] localKeys;
private final LocalKey footerKey;
private final int stripeCount;
private final long stripeStatsOffset;
/**
* Create a reader's view of an encryption variant.
* @param key the encryption key description
* @param variantId the of of the variant (0..N-1)
* @param proto the serialized description of the variant
* @param schema the file schema
* @param stripes the stripe information
* @param stripeStatsOffset the offset of the stripe statistics
* @param tailBuffer the serialized file tail
* @param provider the key provider
*/
ReaderEncryptionVariant(ReaderEncryptionKey key,
int variantId,
OrcProto.EncryptionVariant proto,
TypeDescription schema,
List<StripeInformation> stripes,
long stripeStatsOffset,
BufferChunk tailBuffer,
KeyProvider provider) {
this.key = key;
this.variantId = variantId;
this.provider = provider;
this.column = proto == null || !proto.hasRoot() ? schema :
schema.findSubtype(proto.getRoot());
this.localKeys = new LocalKey[stripes.size()];
HashMap<BytesWritable, LocalKey> cache = new HashMap<>();
stripeCount = stripes.size();
this.stripeStatsOffset = stripeStatsOffset;
if (proto != null && proto.hasEncryptedKey()) {
for (int s = 0; s < localKeys.length; ++s) {
StripeInformation stripe = stripes.get(s);
localKeys[s] = getCachedKey(cache, key.getAlgorithm(),
stripe.getEncryptedLocalKeys()[variantId]);
}
footerKey = getCachedKey(cache, key.getAlgorithm(),
proto.getEncryptedKey().toByteArray());
key.addVariant(this);
stripeStats = proto.getStripeStatisticsList();
this.tailBuffer = tailBuffer;
} else {
footerKey = null;
stripeStats = null;
this.tailBuffer = null;
}
}
@Override
public ReaderEncryptionKey getKeyDescription() {
return key;
}
@Override
public TypeDescription getRoot() {
return column;
}
@Override
public int getVariantId() {
return variantId;
}
/**
* Deduplicate the local keys so that we only decrypt each local key once.
* @param cache the cache to use
* @param encrypted the encrypted key
* @return the local key
*/
private static LocalKey getCachedKey(Map<BytesWritable, LocalKey> cache,
EncryptionAlgorithm algorithm,
byte[] encrypted) {
// wrap byte array in BytesWritable to get equality and hash
BytesWritable wrap = new BytesWritable(encrypted);
LocalKey result = cache.get(wrap);
if (result == null) {
result = new LocalKey(algorithm, null, encrypted);
cache.put(wrap, result);
}
return result;
}
private Key getDecryptedKey(LocalKey localKey) throws IOException {
Key result = localKey.getDecryptedKey();
if (result == null) {
switch (this.key.getState()) {
case UNTRIED:
try {
result = provider.decryptLocalKey(key.getMetadata(),
localKey.getEncryptedKey());
} catch (IOException ioe) {
LOG.info("Can't decrypt using key {}", key);
}
if (result != null) {
localKey.setDecryptedKey(result);
key.setSuccess();
} else {
key.setFailure();
}
break;
case SUCCESS:
result = provider.decryptLocalKey(key.getMetadata(),
localKey.getEncryptedKey());
if (result == null) {
throw new IOException("Can't decrypt local key " + key);
}
localKey.setDecryptedKey(result);
break;
case FAILURE:
return null;
}
}
return result;
}
@Override
public Key getFileFooterKey() throws IOException {
return (key == null || provider == null) ? null : getDecryptedKey(footerKey);
}
@Override
public Key getStripeKey(long stripe) throws IOException {
return (key == null || provider == null) ? null : getDecryptedKey(localKeys[(int) stripe]);
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != getClass()) {
return false;
} else {
return compareTo((EncryptionVariant) other) == 0;
}
}
@Override
public int hashCode() {
return key.hashCode() * 127 + column.getId();
}
@Override
public int compareTo(@NotNull EncryptionVariant other) {
if (other == this) {
return 0;
}
EncryptionKey otherKey = other.getKeyDescription();
if (key == otherKey) {
return Integer.compare(column.getId(), other.getRoot().getId());
} else if (key == null) {
return -1;
} else if (otherKey == null) {
return 1;
} else {
return key.compareTo(other.getKeyDescription());
}
}
public long getStripeStatisticsLength() {
long result = 0;
for(OrcProto.Stream stream: stripeStats) {
result += stream.getLength();
}
return result;
}
/**
* Decrypt the raw data and return the list of the stripe statistics for this
* variant.
* @param columns true for the columns that should be included
* @param compression the compression options
* @return the stripe statistics for this variant.
*/
public List<StripeStatistics> getStripeStatistics(boolean[] columns,
InStream.StreamOptions compression,
ReaderImpl reader
) throws IOException {
StripeStatisticsImpl[] result = new StripeStatisticsImpl[stripeCount];
for(int s=0; s < result.length; ++s) {
result[s] = new StripeStatisticsImpl(column, reader.writerUsedProlepticGregorian(),
reader.getConvertToProlepticGregorian());
}
// create the objects
long offset = stripeStatsOffset;
Key fileKey = getFileFooterKey();
if (fileKey == null) {
throw new IOException("Can't get file footer key for " + key.getKeyName());
}
int root = column.getId();
for(OrcProto.Stream stream: stripeStats){
long length = stream.getLength();
int column = stream.getColumn();
OrcProto.Stream.Kind kind = stream.getKind();
if (kind == OrcProto.Stream.Kind.STRIPE_STATISTICS &&
(columns == null || columns[column])) {
byte[] iv = new byte[key.getAlgorithm().getIvLength()];
CryptoUtils.modifyIvForStream(column, kind, stripeCount + 1).accept(iv);
InStream.StreamOptions options = new InStream.StreamOptions(compression)
.withEncryption(key.getAlgorithm(), fileKey, iv);
OrcProto.ColumnarStripeStatistics stat =
OrcProto.ColumnarStripeStatistics.parseFrom(
InStream.createCodedInputStream(
InStream.create(stream, tailBuffer, offset,
length, options)));
for(int s=0; s < result.length; ++s) {
result[s].updateColumn(column - root, stat.getColStats(s));
}
}
offset += length;
}
return Arrays.asList(result);
}
}
| 9,496 | 33.787546 | 95 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/StripePlanner.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader;
import com.google.protobuf.CodedInputStream;
import org.apache.orc.DataReader;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.BufferChunk;
import org.apache.orc.impl.BufferChunkList;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.InStream;
import org.apache.orc.impl.OrcIndex;
import org.apache.orc.impl.PhysicalFsWriter;
import org.apache.orc.impl.RecordReaderUtils;
import org.apache.orc.impl.StreamName;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.Key;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* This class handles parsing the stripe information and handling the necessary
* filtering and selection.
* <p>
* It supports:
* <ul>
* <li>column projection</li>
* <li>row group selection</li>
* <li>encryption</li>
* </ul>
*/
public class StripePlanner {
private static final Logger LOG = LoggerFactory.getLogger(StripePlanner.class);
// global information
private final TypeDescription schema;
private final OrcFile.WriterVersion version;
private final OrcProto.ColumnEncoding[] encodings;
private final ReaderEncryption encryption;
private final DataReader dataReader;
private final boolean ignoreNonUtf8BloomFilter;
private final long maxBufferSize;
// specific to the current stripe
private String writerTimezone;
private long currentStripeId;
private long originalStripeId;
private final Map<StreamName, StreamInformation> streams = new HashMap<>();
// the index streams sorted by offset
private final List<StreamInformation> indexStreams = new ArrayList<>();
// the data streams sorted by offset
private final List<StreamInformation> dataStreams = new ArrayList<>();
private final OrcProto.Stream.Kind[] bloomFilterKinds;
// does each column have a null stream?
private final boolean[] hasNull;
// identifies the filter column ids whose streams should always be read
private final Set<Integer> filterColIds;
/**
* Create a stripe parser.
* @param schema the file schema
* @param encryption the encryption information
* @param dataReader the underlying data reader
* @param version the file writer version
* @param ignoreNonUtf8BloomFilter ignore old non-utf8 bloom filters
* @param maxBufferSize the largest single buffer to use
* @param filterColIds Column Ids that identify the filter columns
*/
public StripePlanner(TypeDescription schema,
ReaderEncryption encryption,
DataReader dataReader,
OrcFile.WriterVersion version,
boolean ignoreNonUtf8BloomFilter,
long maxBufferSize,
Set<Integer> filterColIds) {
this.schema = schema;
this.version = version;
encodings = new OrcProto.ColumnEncoding[schema.getMaximumId()+1];
this.encryption = encryption;
this.dataReader = dataReader;
this.ignoreNonUtf8BloomFilter = ignoreNonUtf8BloomFilter;
bloomFilterKinds = new OrcProto.Stream.Kind[schema.getMaximumId() + 1];
hasNull = new boolean[schema.getMaximumId() + 1];
this.maxBufferSize = maxBufferSize;
this.filterColIds = filterColIds;
}
public StripePlanner(TypeDescription schema,
ReaderEncryption encryption,
DataReader dataReader,
OrcFile.WriterVersion version,
boolean ignoreNonUtf8BloomFilter,
long maxBufferSize) {
this(schema, encryption, dataReader, version, ignoreNonUtf8BloomFilter, maxBufferSize,
Collections.emptySet());
}
public StripePlanner(StripePlanner old) {
this(old.schema, old.encryption, old.dataReader, old.version,
old.ignoreNonUtf8BloomFilter, old.maxBufferSize, old.filterColIds);
}
/**
* Parse a new stripe. Resets the current stripe state.
* @param stripe the new stripe
* @param columnInclude an array with true for each column to read
* @return this for method chaining
*/
public StripePlanner parseStripe(StripeInformation stripe,
boolean[] columnInclude) throws IOException {
OrcProto.StripeFooter footer = dataReader.readStripeFooter(stripe);
currentStripeId = stripe.getStripeId();
originalStripeId = stripe.getEncryptionStripeId();
writerTimezone = footer.getWriterTimezone();
streams.clear();
dataStreams.clear();
indexStreams.clear();
buildEncodings(footer, columnInclude);
findStreams(stripe.getOffset(), footer, columnInclude);
// figure out whether each column has null values in this stripe
Arrays.fill(hasNull, false);
for(StreamInformation stream: dataStreams) {
if (stream.kind == OrcProto.Stream.Kind.PRESENT) {
hasNull[stream.column] = true;
}
}
return this;
}
/**
* Read the stripe data from the file.
* @param index null for no row filters or the index for filtering
* @param rowGroupInclude null for all of the rows or an array with boolean
* for each row group in the current stripe.
* @param forceDirect should direct buffers be created?
* @param readPhase influences the columns that are read e.g. if readPhase = LEADERS then only
* the data required for FILTER columns is read
* @return the buffers that were read
*/
public BufferChunkList readData(OrcIndex index,
boolean[] rowGroupInclude,
boolean forceDirect,
TypeReader.ReadPhase readPhase) throws IOException {
BufferChunkList chunks = (index == null || rowGroupInclude == null)
? planDataReading(readPhase) : planPartialDataReading(index, rowGroupInclude,
readPhase);
dataReader.readFileData(chunks, forceDirect);
return chunks;
}
public BufferChunkList readFollowData(OrcIndex index,
boolean[] rowGroupInclude,
int rgIdx,
boolean forceDirect)
throws IOException {
BufferChunkList chunks = (index == null || rowGroupInclude == null)
? planDataReading(TypeReader.ReadPhase.FOLLOWERS)
: planPartialDataReading(index, rowGroupInclude, rgIdx, TypeReader.ReadPhase.FOLLOWERS);
dataReader.readFileData(chunks, forceDirect);
return chunks;
}
public String getWriterTimezone() {
return writerTimezone;
}
/**
* Get the stream for the given name.
* It is assumed that the name does <b>not</b> have the encryption set,
* because the TreeReader's don't know if they are reading encrypted data.
* Assumes that readData has already been called on this stripe.
* @param name the column/kind of the stream
* @return a new stream with the options set correctly
*/
public InStream getStream(StreamName name) throws IOException {
StreamInformation stream = streams.get(name);
return stream == null ? null
: InStream.create(name, stream.firstChunk, stream.offset, stream.length,
getStreamOptions(stream.column, stream.kind));
}
/**
* Release all of the buffers for the current stripe.
*/
public void clearStreams() {
if (dataReader.isTrackingDiskRanges()) {
for (StreamInformation stream : indexStreams) {
stream.releaseBuffers(dataReader);
}
for (StreamInformation stream : dataStreams) {
stream.releaseBuffers(dataReader);
}
}
indexStreams.clear();
dataStreams.clear();
streams.clear();
}
/**
* Get the stream options for a stream in a stripe.
* @param column the column we are reading
* @param kind the stream kind we are reading
* @return a new stream options to read the given column
*/
private InStream.StreamOptions getStreamOptions(int column,
OrcProto.Stream.Kind kind
) throws IOException {
ReaderEncryptionVariant variant = encryption.getVariant(column);
InStream.StreamOptions compression = dataReader.getCompressionOptions();
if (variant == null) {
return compression;
} else {
EncryptionAlgorithm algorithm = variant.getKeyDescription().getAlgorithm();
byte[] iv = new byte[algorithm.getIvLength()];
Key key = variant.getStripeKey(currentStripeId);
CryptoUtils.modifyIvForStream(column, kind, originalStripeId).accept(iv);
return new InStream.StreamOptions(compression)
.withEncryption(algorithm, key, iv);
}
}
public OrcProto.ColumnEncoding getEncoding(int column) {
return encodings[column];
}
private void buildEncodings(OrcProto.StripeFooter footer,
boolean[] columnInclude) {
for(int c=0; c < encodings.length; ++c) {
if (columnInclude == null || columnInclude[c]) {
ReaderEncryptionVariant variant = encryption.getVariant(c);
if (variant == null) {
encodings[c] = footer.getColumns(c);
} else {
int subColumn = c - variant.getRoot().getId();
encodings[c] = footer.getEncryption(variant.getVariantId())
.getEncoding(subColumn);
}
}
}
}
/**
* For each stream, decide whether to include it in the list of streams.
* @param offset the position in the file for this stream
* @param columnInclude which columns are being read
* @param stream the stream to consider
* @param area only the area will be included
* @param variant the variant being read
* @return the offset for the next stream
*/
private long handleStream(long offset,
boolean[] columnInclude,
OrcProto.Stream stream,
StreamName.Area area,
ReaderEncryptionVariant variant) {
int column = stream.getColumn();
if (stream.hasKind()) {
OrcProto.Stream.Kind kind = stream.getKind();
if (StreamName.getArea(kind) != area || kind == OrcProto.Stream.Kind.ENCRYPTED_INDEX ||
kind == OrcProto.Stream.Kind.ENCRYPTED_DATA) {
// Ignore the placeholders that shouldn't count toward moving the
// offsets.
return 0;
}
if (columnInclude[column] && encryption.getVariant(column) == variant) {
// Ignore any broken bloom filters unless the user forced us to use
// them.
if (kind != OrcProto.Stream.Kind.BLOOM_FILTER ||
!ignoreNonUtf8BloomFilter ||
!hadBadBloomFilters(schema.findSubtype(column).getCategory(),
version)) {
// record what kind of bloom filters we are using
if (kind == OrcProto.Stream.Kind.BLOOM_FILTER_UTF8 ||
kind == OrcProto.Stream.Kind.BLOOM_FILTER) {
bloomFilterKinds[column] = kind;
}
StreamInformation info =
new StreamInformation(kind, column, offset, stream.getLength());
switch (StreamName.getArea(kind)) {
case DATA:
dataStreams.add(info);
break;
case INDEX:
indexStreams.add(info);
break;
default:
}
streams.put(new StreamName(column, kind), info);
}
}
}
return stream.getLength();
}
/**
* Find the complete list of streams.
* CurrentOffset total order must be consistent with write
* {@link PhysicalFsWriter#finalizeStripe}
* @param streamStart the starting offset of streams in the file
* @param footer the footer for the stripe
* @param columnInclude which columns are being read
*/
private void findStreams(long streamStart,
OrcProto.StripeFooter footer,
boolean[] columnInclude) throws IOException {
long currentOffset = streamStart;
Arrays.fill(bloomFilterKinds, null);
// +-----------------+---------------+-----------------+---------------+
// | | | | |
// | unencrypted | encrypted | unencrypted | encrypted |
// | index | index | data | data |
// | | | | |
// +-----------------+---------------+-----------------+---------------+
// Storage layout of index and data, So we need to find the streams in this order
//
// find index streams, encrypted first and then unencrypted
currentOffset = findStreamsByArea(currentOffset, footer, StreamName.Area.INDEX, columnInclude);
// find data streams, encrypted first and then unencrypted
findStreamsByArea(currentOffset, footer, StreamName.Area.DATA, columnInclude);
}
private long findStreamsByArea(long currentOffset,
OrcProto.StripeFooter footer,
StreamName.Area area,
boolean[] columnInclude) {
// find unencrypted streams
for(OrcProto.Stream stream: footer.getStreamsList()) {
currentOffset += handleStream(currentOffset, columnInclude, stream, area, null);
}
// find encrypted streams
for(ReaderEncryptionVariant variant: encryption.getVariants()) {
int variantId = variant.getVariantId();
OrcProto.StripeEncryptionVariant stripeVariant =
footer.getEncryption(variantId);
for(OrcProto.Stream stream: stripeVariant.getStreamsList()) {
currentOffset += handleStream(currentOffset, columnInclude, stream, area, variant);
}
}
return currentOffset;
}
/**
* Read and parse the indexes for the current stripe.
* @param sargColumns the columns we can use bloom filters for
* @param output an OrcIndex to reuse
* @return the indexes for the required columns
*/
public OrcIndex readRowIndex(boolean[] sargColumns,
OrcIndex output) throws IOException {
int typeCount = schema.getMaximumId() + 1;
if (output == null) {
output = new OrcIndex(new OrcProto.RowIndex[typeCount],
new OrcProto.Stream.Kind[typeCount],
new OrcProto.BloomFilterIndex[typeCount]);
}
System.arraycopy(bloomFilterKinds, 0, output.getBloomFilterKinds(), 0,
bloomFilterKinds.length);
BufferChunkList ranges = planIndexReading(sargColumns);
dataReader.readFileData(ranges, false);
OrcProto.RowIndex[] indexes = output.getRowGroupIndex();
OrcProto.BloomFilterIndex[] blooms = output.getBloomFilterIndex();
for(StreamInformation stream: indexStreams) {
int column = stream.column;
if (stream.firstChunk != null) {
CodedInputStream data = InStream.createCodedInputStream(InStream.create(
"index", stream.firstChunk, stream.offset,
stream.length, getStreamOptions(column, stream.kind)));
switch (stream.kind) {
case ROW_INDEX:
indexes[column] = OrcProto.RowIndex.parseFrom(data);
break;
case BLOOM_FILTER:
case BLOOM_FILTER_UTF8:
if (sargColumns != null && sargColumns[column]) {
blooms[column] = OrcProto.BloomFilterIndex.parseFrom(data);
}
break;
default:
break;
}
}
}
return output;
}
private void addChunk(BufferChunkList list, StreamInformation stream,
long offset, long length) {
while (length > 0) {
long thisLen = Math.min(length, maxBufferSize);
BufferChunk chunk = new BufferChunk(offset, (int) thisLen);
if (stream.firstChunk == null) {
stream.firstChunk = chunk;
}
list.add(chunk);
offset += thisLen;
length -= thisLen;
}
}
/**
* Plans the list of disk ranges that the given stripe needs to read the
* indexes. All of the positions are relative to the start of the stripe.
* @param bloomFilterColumns true for the columns (indexed by file columns) that
* we need bloom filters for
* @return a list of merged disk ranges to read
*/
private BufferChunkList planIndexReading(boolean[] bloomFilterColumns) {
BufferChunkList result = new BufferChunkList();
for(StreamInformation stream: indexStreams) {
switch (stream.kind) {
case ROW_INDEX:
addChunk(result, stream, stream.offset, stream.length);
break;
case BLOOM_FILTER:
case BLOOM_FILTER_UTF8:
if (bloomFilterColumns[stream.column] &&
bloomFilterKinds[stream.column] == stream.kind) {
addChunk(result, stream, stream.offset, stream.length);
}
break;
default:
// PASS
break;
}
}
return result;
}
/**
* Plans the list of disk ranges that the given stripe needs to read the
* data.
*
* @param readPhase Determines the columns that will be planned.
* @return a list of merged disk ranges to read
*/
private BufferChunkList planDataReading(TypeReader.ReadPhase readPhase) {
BufferChunkList result = new BufferChunkList();
for(StreamInformation stream: dataStreams) {
if (readPhase == TypeReader.ReadPhase.ALL ||
(readPhase == TypeReader.ReadPhase.LEADERS && filterColIds.contains(stream.column)) ||
(readPhase == TypeReader.ReadPhase.FOLLOWERS && !filterColIds.contains(stream.column))) {
addChunk(result, stream, stream.offset, stream.length);
} else {
// In case a filter is present, then don't plan the lazy columns, they will be planned only
// as needed.
LOG.debug("Skipping planning for lazy column stream {}", stream);
}
}
return result;
}
static boolean hadBadBloomFilters(TypeDescription.Category category,
OrcFile.WriterVersion version) {
switch(category) {
case STRING:
case CHAR:
case VARCHAR:
return !version.includes(OrcFile.WriterVersion.HIVE_12055);
case DECIMAL:
// fixed by ORC-101, but ORC-101 changed stream kind to BLOOM_FILTER_UTF8
return true;
case TIMESTAMP:
return !version.includes(OrcFile.WriterVersion.ORC_135);
default:
return false;
}
}
private static boolean hasSomeRowGroups(boolean[] includedRowGroups) {
for(boolean include: includedRowGroups) {
if (include) {
return true;
}
}
return false;
}
/**
* Plan the ranges of the file that we need to read given the list of
* columns and row groups.
*
* @param index the index to use for offsets
* @param includedRowGroups which row groups are needed
* @param readPhase Determines the columns that will be planned.
* @return the list of disk ranges that will be loaded
*/
private BufferChunkList planPartialDataReading(OrcIndex index,
@NotNull boolean[] includedRowGroups,
TypeReader.ReadPhase readPhase) {
return planPartialDataReading(index, includedRowGroups, 0, readPhase);
}
/**
* Plan the ranges of the file that we need to read given the list of
* columns and row groups.
*
* @param index the index to use for offsets
* @param includedRowGroups which row groups are needed
* @return the list of disk ranges that will be loaded
*/
private BufferChunkList planPartialDataReading(OrcIndex index,
@NotNull boolean[] includedRowGroups,
int startGroup,
TypeReader.ReadPhase readPhase) {
BufferChunkList result = new BufferChunkList();
if (hasSomeRowGroups(includedRowGroups)) {
InStream.StreamOptions compression = dataReader.getCompressionOptions();
boolean isCompressed = compression.getCodec() != null;
int bufferSize = compression.getBufferSize();
OrcProto.RowIndex[] rowIndex = index.getRowGroupIndex();
for (StreamInformation stream : dataStreams) {
if (readPhase == TypeReader.ReadPhase.ALL ||
(readPhase == TypeReader.ReadPhase.LEADERS && filterColIds.contains(stream.column)) ||
(readPhase == TypeReader.ReadPhase.FOLLOWERS &&
!filterColIds.contains(stream.column))) {
processStream(stream, result, rowIndex, startGroup,
includedRowGroups, isCompressed, bufferSize);
} else {
// In case a filter is present, then don't plan the lazy columns, they will be planned only
// as needed.
LOG.debug("Skipping planning for column stream {} at level {}", stream, readPhase);
}
}
}
return result;
}
private void processStream(StreamInformation stream,
BufferChunkList result,
OrcProto.RowIndex[] rowIndex,
int startGroup,
boolean[] includedRowGroups,
boolean isCompressed,
int bufferSize) {
if (RecordReaderUtils.isDictionary(stream.kind, encodings[stream.column])) {
addChunk(result, stream, stream.offset, stream.length);
} else {
int column = stream.column;
OrcProto.RowIndex ri = rowIndex[column];
TypeDescription.Category kind = schema.findSubtype(column).getCategory();
long alreadyRead = 0;
for (int group = startGroup; group < includedRowGroups.length; ++group) {
if (includedRowGroups[group]) {
// find the last group that is selected
int endGroup = group;
while (endGroup < includedRowGroups.length - 1 &&
includedRowGroups[endGroup + 1]) {
endGroup += 1;
}
int posn = RecordReaderUtils.getIndexPosition(
encodings[stream.column].getKind(), kind, stream.kind,
isCompressed, hasNull[column]);
long start = Math.max(alreadyRead,
stream.offset + (group == 0 ? 0 : ri.getEntry(group).getPositions(posn)));
long end = stream.offset;
if (endGroup == includedRowGroups.length - 1) {
end += stream.length;
} else {
long nextGroupOffset = ri.getEntry(endGroup + 1).getPositions(posn);
end += RecordReaderUtils.estimateRgEndOffset(isCompressed,
bufferSize, false, nextGroupOffset,
stream.length);
}
if (alreadyRead < end) {
addChunk(result, stream, start, end - start);
alreadyRead = end;
}
group = endGroup;
}
}
}
}
public static class StreamInformation {
public final OrcProto.Stream.Kind kind;
public final int column;
public final long offset;
public final long length;
public BufferChunk firstChunk;
public StreamInformation(OrcProto.Stream.Kind kind, int column, long offset, long length) {
this.kind = kind;
this.column = column;
this.offset = offset;
this.length = length;
}
void releaseBuffers(DataReader reader) {
long end = offset + length;
BufferChunk ptr = firstChunk;
while (ptr != null && ptr.getOffset() < end) {
ByteBuffer buffer = ptr.getData();
if (buffer != null) {
reader.releaseBuffer(buffer);
ptr.setChunk(null);
}
ptr = (BufferChunk) ptr.next;
}
}
}
}
| 25,208 | 38.636792 | 101 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/tree/BatchReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader.tree;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.PositionProvider;
import org.apache.orc.impl.reader.StripePlanner;
import java.io.IOException;
/**
* The top level interface that the reader uses to read the columns from the
* ORC file.
*/
public abstract class BatchReader {
// The row type reader
public final TypeReader rootType;
protected int vectorColumnCount = -1;
public BatchReader(TypeReader rootType) {
this.rootType = rootType;
}
public abstract void startStripe(StripePlanner planner, TypeReader.ReadPhase readPhase)
throws IOException;
public void setVectorColumnCount(int vectorColumnCount) {
this.vectorColumnCount = vectorColumnCount;
}
/**
* Read the next batch of data from the file.
* @param batch the batch to read into
* @param batchSize the number of rows to read
* @param readPhase defines the read phase
* @throws IOException errors reading the file
*/
public abstract void nextBatch(VectorizedRowBatch batch,
int batchSize,
TypeReader.ReadPhase readPhase) throws IOException;
protected void resetBatch(VectorizedRowBatch batch, int batchSize) {
batch.selectedInUse = false;
batch.size = batchSize;
}
public abstract void skipRows(long rows, TypeReader.ReadPhase readPhase) throws IOException;
public abstract void seek(PositionProvider[] index, TypeReader.ReadPhase readPhase)
throws IOException;
}
| 2,378 | 33.985294 | 94 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/tree/PrimitiveBatchReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader.tree;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.PositionProvider;
import org.apache.orc.impl.reader.StripePlanner;
import java.io.IOException;
public class PrimitiveBatchReader extends BatchReader {
public PrimitiveBatchReader(TypeReader rowReader) {
super(rowReader);
}
@Override
public void nextBatch(VectorizedRowBatch batch,
int batchSize,
TypeReader.ReadPhase readPhase) throws IOException {
batch.cols[0].reset();
batch.cols[0].ensureSize(batchSize, false);
rootType.nextVector(batch.cols[0], null, batchSize, batch, readPhase);
resetBatch(batch, batchSize);
}
public void startStripe(StripePlanner planner, TypeReader.ReadPhase readPhase)
throws IOException {
rootType.startStripe(planner, readPhase);
}
public void skipRows(long rows, TypeReader.ReadPhase readPhase) throws IOException {
rootType.skipRows(rows, readPhase);
}
public void seek(PositionProvider[] index, TypeReader.ReadPhase readPhase) throws IOException {
rootType.seek(index, readPhase);
}
}
| 1,974 | 34.909091 | 97 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/tree/StructBatchReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader.tree;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.apache.orc.impl.PositionProvider;
import org.apache.orc.impl.TreeReaderFactory;
import org.apache.orc.impl.reader.StripePlanner;
import java.io.IOException;
/**
* Handles the Struct rootType for batch handling. The handling assumes that the root
* {@link org.apache.orc.impl.TreeReaderFactory.StructTreeReader} no nulls. Root Struct vector is
* not represented as part of the final {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch}.
*/
public class StructBatchReader extends BatchReader {
// The reader context including row-filtering details
private final TreeReaderFactory.Context context;
private final OrcFilterContextImpl filterContext;
private final TreeReaderFactory.StructTreeReader structReader;
public StructBatchReader(TypeReader rowReader, TreeReaderFactory.Context context) {
super(rowReader);
this.context = context;
this.filterContext = new OrcFilterContextImpl(context.getSchemaEvolution().getReaderSchema(),
context.getSchemaEvolution()
.isSchemaEvolutionCaseAware());
structReader = (TreeReaderFactory.StructTreeReader) rowReader;
}
private void readBatchColumn(VectorizedRowBatch batch,
TypeReader child,
int batchSize,
int index,
TypeReader.ReadPhase readPhase)
throws IOException {
ColumnVector colVector = batch.cols[index];
if (colVector != null) {
if (readPhase.contains(child.getReaderCategory())) {
// Reset the column vector only if the current column is being processed. If the children
// are being processed then we should reset the parent e.g. PARENT_FILTER during FOLLOWERS
// read phase.
colVector.reset();
colVector.ensureSize(batchSize, false);
}
child.nextVector(colVector, null, batchSize, batch, readPhase);
}
}
@Override
public void nextBatch(VectorizedRowBatch batch, int batchSize, TypeReader.ReadPhase readPhase)
throws IOException {
if (readPhase == TypeReader.ReadPhase.ALL || readPhase == TypeReader.ReadPhase.LEADERS) {
// selectedInUse = true indicates that the selected vector should be used to determine
// valid rows in the batch
batch.selectedInUse = false;
}
nextBatchForLevel(batch, batchSize, readPhase);
if (readPhase == TypeReader.ReadPhase.ALL || readPhase == TypeReader.ReadPhase.LEADERS) {
// Set the batch size when reading everything or when reading FILTER columns
batch.size = batchSize;
}
if (readPhase == TypeReader.ReadPhase.LEADERS) {
// Apply filter callback to reduce number of # rows selected for decoding in the next
// TreeReaders
if (this.context.getColumnFilterCallback() != null) {
this.context.getColumnFilterCallback().accept(filterContext.setBatch(batch));
}
}
}
private void nextBatchForLevel(
VectorizedRowBatch batch, int batchSize, TypeReader.ReadPhase readPhase)
throws IOException {
TypeReader[] children = structReader.fields;
for (int i = 0; i < children.length && (vectorColumnCount == -1 || i < vectorColumnCount);
++i) {
if (TypeReader.shouldProcessChild(children[i], readPhase)) {
readBatchColumn(batch, children[i], batchSize, i, readPhase);
}
}
}
@Override
public void startStripe(StripePlanner planner, TypeReader.ReadPhase readPhase)
throws IOException {
TypeReader[] children = ((TreeReaderFactory.StructTreeReader) rootType).fields;
for (int i = 0; i < children.length &&
(vectorColumnCount == -1 || i < vectorColumnCount); ++i) {
if (TypeReader.shouldProcessChild(children[i], readPhase)) {
children[i].startStripe(planner, readPhase);
}
}
}
@Override
public void skipRows(long rows, TypeReader.ReadPhase readerCategory) throws IOException {
TypeReader[] children = ((TreeReaderFactory.StructTreeReader) rootType).fields;
for (int i = 0; i < children.length &&
(vectorColumnCount == -1 || i < vectorColumnCount); ++i) {
if (TypeReader.shouldProcessChild(children[i], readerCategory)) {
children[i].skipRows(rows, readerCategory);
}
}
}
@Override
public void seek(PositionProvider[] index, TypeReader.ReadPhase readPhase) throws IOException {
TypeReader[] children = ((TreeReaderFactory.StructTreeReader) rootType).fields;
for (int i = 0; i < children.length &&
(vectorColumnCount == -1 || i < vectorColumnCount); ++i) {
if (TypeReader.shouldProcessChild(children[i], readPhase)) {
children[i].seek(index, readPhase);
}
}
}
}
| 5,854 | 41.122302 | 105 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/reader/tree/TypeReader.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader.tree;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.io.filter.FilterContext;
import org.apache.orc.OrcProto;
import org.apache.orc.impl.PositionProvider;
import org.apache.orc.impl.reader.StripePlanner;
import java.io.IOException;
import java.util.EnumSet;
public interface TypeReader {
void checkEncoding(OrcProto.ColumnEncoding encoding) throws IOException;
void startStripe(StripePlanner planner, ReadPhase readPhase) throws IOException;
void seek(PositionProvider[] index, ReadPhase readPhase) throws IOException;
void seek(PositionProvider index, ReadPhase readPhase) throws IOException;
void skipRows(long rows, ReadPhase readPhase) throws IOException;
void nextVector(ColumnVector previous,
boolean[] isNull,
int batchSize,
FilterContext filterContext,
ReadPhase readPhase) throws IOException;
int getColumnId();
ReaderCategory getReaderCategory();
/**
* Determines if the child of the parent should be allowed based on the read level. The child
* is allowed based on the read level or if the child is a FILTER_PARENT, this allows the handling
* of NON_FILTER children on the FILTER_PARENT child
* @param child the child reader that is being evaluated
* @param readPhase the requested read level
* @return true if allowed by read level or if it is a FILTER_PARENT otherwise false
*/
static boolean shouldProcessChild(TypeReader child, ReadPhase readPhase) {
return readPhase.contains(child.getReaderCategory()) ||
child.getReaderCategory() == ReaderCategory.FILTER_PARENT;
}
enum ReaderCategory {
FILTER_CHILD, // Primitive type that is a filter column
FILTER_PARENT, // Compound type with filter children
NON_FILTER // Non-filter column
}
enum ReadPhase {
// Used to read all columns in the absence of filters
ALL(EnumSet.allOf(ReaderCategory.class)),
// Used to perform read of the filter columns in the presence of filters
LEADERS(EnumSet.of(ReaderCategory.FILTER_PARENT, ReaderCategory.FILTER_CHILD)),
// Used to perform the read of non-filter columns after a match on the filter columns when a
// skip is not needed on the non-filter columns
FOLLOWERS(EnumSet.of(ReaderCategory.NON_FILTER)),
// Used to reposition the FILTER_PARENTs when a forward seek is required within the same row
// group
LEADER_PARENTS(EnumSet.of(ReaderCategory.FILTER_PARENT)),
// Used to reposition the FILTER_PARENTs and NON_FILTERs, this is required to accurately
// determine the the non-null rows to skip.
FOLLOWERS_AND_PARENTS(EnumSet.of(ReaderCategory.FILTER_PARENT, ReaderCategory.NON_FILTER));
EnumSet<ReaderCategory> categories;
ReadPhase(EnumSet<ReaderCategory> categories) {
this.categories = categories;
}
public boolean contains(ReaderCategory readerCategory) {
return categories.contains(readerCategory);
}
}
}
| 3,860 | 39.642105 | 100 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/BinaryTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.BinaryColumnStatistics;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
import java.util.function.Consumer;
public class BinaryTreeWriter extends TreeWriterBase {
private final PositionedOutputStream stream;
private final IntegerWriter length;
private boolean isDirectV2 = true;
public BinaryTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.stream = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.isDirectV2 = isNewWriteFormat(context);
this.length = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.LENGTH, encryption)),
false, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
BytesColumnVector vec = (BytesColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
for (int i = 0; i < length; ++i) {
stream.write(vec.vector[0], vec.start[0],
vec.length[0]);
this.length.write(vec.length[0]);
}
indexStatistics.updateBinary(vec.vector[0], vec.start[0],
vec.length[0], length);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addBytes(vec.vector[0], vec.start[0], vec.length[0]);
}
bloomFilterUtf8.addBytes(vec.vector[0], vec.start[0], vec.length[0]);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
stream.write(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]);
this.length.write(vec.length[offset + i]);
indexStatistics.updateBinary(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i], 1);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addBytes(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]);
}
bloomFilterUtf8.addBytes(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
stream.getPosition(recorder);
length.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + stream.getBufferSize() +
length.estimateMemory();
}
@Override
public long getRawDataSize() {
// get total length of binary blob
BinaryColumnStatistics bcs = (BinaryColumnStatistics) fileStatistics;
return bcs.getSum();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
stream.flush();
length.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
Consumer<byte[]> updater = CryptoUtils.modifyIvForStripe(stripeId);
stream.changeIv(updater);
length.changeIv(updater);
}
}
| 5,221 | 33.130719 | 79 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/BooleanTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.BitFieldWriter;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class BooleanTreeWriter extends TreeWriterBase {
private final BitFieldWriter writer;
public BooleanTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
PositionedOutputStream out = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.writer = new BitFieldWriter(out, 1);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
LongColumnVector vec = (LongColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
int value = vec.vector[0] == 0 ? 0 : 1;
indexStatistics.updateBoolean(value != 0, length);
for (int i = 0; i < length; ++i) {
writer.write(value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
int value = vec.vector[i + offset] == 0 ? 0 : 1;
writer.write(value);
indexStatistics.updateBoolean(value != 0, 1);
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
writer.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + writer.estimateMemory();
}
@Override
public long getRawDataSize() {
long num = fileStatistics.getNumberOfValues();
return num * JavaDataModel.get().primitive1();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
writer.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
writer.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 3,613 | 31.854545 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/ByteTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.RunLengthByteWriter;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class ByteTreeWriter extends TreeWriterBase {
private final RunLengthByteWriter writer;
public ByteTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.writer = new RunLengthByteWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption)));
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
LongColumnVector vec = (LongColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
byte value = (byte) vec.vector[0];
indexStatistics.updateInteger(value, length);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
for (int i = 0; i < length; ++i) {
writer.write(value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
byte value = (byte) vec.vector[i + offset];
writer.write(value);
indexStatistics.updateInteger(value, 1);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
writer.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + writer.estimateMemory();
}
@Override
public long getRawDataSize() {
long num = fileStatistics.getNumberOfValues();
return num * JavaDataModel.get().primitive1();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
writer.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
writer.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 3,864 | 31.208333 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/CharTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.Utf8Utils;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
/**
* Under the covers, char is written to ORC the same way as string.
*/
public class CharTreeWriter extends StringBaseTreeWriter {
private final int maxLength;
private final byte[] padding;
CharTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
maxLength = schema.getMaxLength();
// utf-8 is currently 4 bytes long, but it could be upto 6
padding = new byte[6*maxLength];
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
BytesColumnVector vec = (BytesColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
// 0, length times
writePadded(vec, 0, length);
}
} else {
for(int i=0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
// offset + i, once per loop
writePadded(vec, i + offset, 1);
}
}
}
}
private void writePadded(BytesColumnVector vec, int row, int repeats) throws IOException {
final byte[] ptr;
final int ptrOffset;
final int ptrLength;
int charLength = Utf8Utils.charLength(vec.vector[row], vec.start[row], vec.length[row]);
if (charLength >= maxLength) {
ptr = vec.vector[row];
ptrOffset = vec.start[row];
ptrLength =
Utf8Utils
.truncateBytesTo(maxLength, vec.vector[row], vec.start[row], vec.length[row]);
} else {
ptr = padding;
// the padding is exactly 1 byte per char
ptrLength = vec.length[row] + (maxLength - charLength);
ptrOffset = 0;
System.arraycopy(vec.vector[row], vec.start[row], ptr, 0, vec.length[row]);
Arrays.fill(ptr, vec.length[row], ptrLength, (byte) ' ');
}
if (useDictionaryEncoding) {
int id = dictionary.add(ptr, ptrOffset, ptrLength);
for (int i = 0; i < repeats; ++i) {
rows.add(id);
}
} else {
for (int i = 0; i < repeats; ++i) {
directStreamOutput.write(ptr, ptrOffset, ptrLength);
lengthOutput.write(ptrLength);
}
}
indexStatistics.updateString(ptr, ptrOffset, ptrLength, repeats);
if (createBloomFilter) {
if (bloomFilter != null) {
// translate from UTF-8 to the default charset
bloomFilter.addString(new String(ptr, ptrOffset, ptrLength, StandardCharsets.UTF_8));
}
bloomFilterUtf8.addBytes(ptr, ptrOffset, ptrLength);
}
}
}
| 3,772 | 34.59434 | 93 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/DateTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class DateTreeWriter extends TreeWriterBase {
private final IntegerWriter writer;
private final boolean isDirectV2;
private final boolean useProleptic;
public DateTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
OutStream out = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.isDirectV2 = isNewWriteFormat(context);
this.writer = createIntegerWriter(out, true, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
useProleptic = context.getProlepticGregorian();
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
LongColumnVector vec = (LongColumnVector) vector;
if (vector instanceof DateColumnVector) {
((DateColumnVector) vec).changeCalendar(useProleptic, true);
} else if (useProleptic) {
throw new IllegalArgumentException("Can't use LongColumnVector to write" +
" proleptic dates");
}
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
int value = (int) vec.vector[0];
indexStatistics.updateDate(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
for (int i = 0; i < length; ++i) {
writer.write(value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
int value = (int) vec.vector[i + offset];
writer.write(value);
indexStatistics.updateDate(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
writer.getPosition(recorder);
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
@Override
public long estimateMemory() {
return super.estimateMemory() + writer.estimateMemory();
}
@Override
public long getRawDataSize() {
return fileStatistics.getNumberOfValues() *
JavaDataModel.get().lengthOfDate();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
writer.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
writer.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 4,757 | 32.041667 | 80 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/Decimal64TreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.RunLengthIntegerWriterV2;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
/**
* Writer for short decimals in ORCv2.
*/
public class Decimal64TreeWriter extends TreeWriterBase {
private final RunLengthIntegerWriterV2 valueWriter;
private final int scale;
public Decimal64TreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
OutStream stream = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
// Use RLEv2 until we have the new RLEv3.
valueWriter = new RunLengthIntegerWriterV2(stream, true, true);
scale = schema.getScale();
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
private void writeBatch(DecimalColumnVector vector, int offset,
int length) throws IOException {
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
HiveDecimalWritable value = vector.vector[0];
long lg = value.serialize64(scale);
indexStatistics.updateDecimal64(lg, scale);
if (createBloomFilter) {
bloomFilterUtf8.addLong(lg);
}
for (int i = 0; i < length; ++i) {
valueWriter.write(lg);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vector.noNulls || !vector.isNull[i + offset]) {
HiveDecimalWritable value = vector.vector[i + offset];
long lg = value.serialize64(scale);
valueWriter.write(lg);
indexStatistics.updateDecimal64(lg, scale);
if (createBloomFilter) {
bloomFilterUtf8.addLong(lg);
}
}
}
}
}
private void writeBatch(Decimal64ColumnVector vector, int offset,
int length) throws IOException {
assert(scale == vector.scale);
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
long lg = vector.vector[0];
indexStatistics.updateDecimal64(lg, scale);
if (createBloomFilter) {
bloomFilterUtf8.addLong(lg);
}
for (int i = 0; i < length; ++i) {
valueWriter.write(lg);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vector.noNulls || !vector.isNull[i + offset]) {
long lg = vector.vector[i + offset];
valueWriter.write(lg);
indexStatistics.updateDecimal64(lg, scale);
if (createBloomFilter) {
bloomFilterUtf8.addLong(lg);
}
}
}
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
if (vector instanceof Decimal64ColumnVector) {
writeBatch((Decimal64ColumnVector) vector, offset, length);
} else {
writeBatch((DecimalColumnVector) vector, offset, length);
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
valueWriter.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + valueWriter.estimateMemory();
}
@Override
public long getRawDataSize() {
return fileStatistics.getNumberOfValues() * JavaDataModel.get().primitive2();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
valueWriter.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
valueWriter.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 5,346 | 32.21118 | 81 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/DecimalTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.SerializationUtils;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
import java.util.function.Consumer;
public class DecimalTreeWriter extends TreeWriterBase {
private final PositionedOutputStream valueStream;
private final SerializationUtils utils = new SerializationUtils();
// These scratch buffers allow us to serialize decimals much faster.
private final long[] scratchLongs;
private final byte[] scratchBuffer;
private final IntegerWriter scaleStream;
private final boolean isDirectV2;
public DecimalTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.isDirectV2 = isNewWriteFormat(context);
valueStream = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
scratchLongs = new long[HiveDecimal.SCRATCH_LONGS_LEN];
scratchBuffer = new byte[HiveDecimal.SCRATCH_BUFFER_LEN_TO_BYTES];
this.scaleStream = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.SECONDARY, encryption)),
true, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
private void writeBatch(DecimalColumnVector vector, int offset,
int length) throws IOException {
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
HiveDecimalWritable value = vector.vector[0];
indexStatistics.updateDecimal(value);
if (createBloomFilter) {
String str = value.toString(scratchBuffer);
if (bloomFilter != null) {
bloomFilter.addString(str);
}
bloomFilterUtf8.addString(str);
}
for (int i = 0; i < length; ++i) {
value.serializationUtilsWrite(valueStream,
scratchLongs);
scaleStream.write(value.scale());
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vector.noNulls || !vector.isNull[i + offset]) {
HiveDecimalWritable value = vector.vector[i + offset];
value.serializationUtilsWrite(valueStream, scratchLongs);
scaleStream.write(value.scale());
indexStatistics.updateDecimal(value);
if (createBloomFilter) {
String str = value.toString(scratchBuffer);
if (bloomFilter != null) {
bloomFilter.addString(str);
}
bloomFilterUtf8.addString(str);
}
}
}
}
}
private void writeBatch(Decimal64ColumnVector vector, int offset,
int length) throws IOException {
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
indexStatistics.updateDecimal64(vector.vector[0], vector.scale);
if (createBloomFilter) {
HiveDecimalWritable value = vector.getScratchWritable();
value.setFromLongAndScale(vector.vector[0], vector.scale);
String str = value.toString(scratchBuffer);
if (bloomFilter != null) {
bloomFilter.addString(str);
}
bloomFilterUtf8.addString(str);
}
for (int i = 0; i < length; ++i) {
utils.writeVslong(valueStream, vector.vector[0]);
scaleStream.write(vector.scale);
}
}
} else {
HiveDecimalWritable value = vector.getScratchWritable();
for (int i = 0; i < length; ++i) {
if (vector.noNulls || !vector.isNull[i + offset]) {
long num = vector.vector[i + offset];
utils.writeVslong(valueStream, num);
scaleStream.write(vector.scale);
indexStatistics.updateDecimal64(num, vector.scale);
if (createBloomFilter) {
value.setFromLongAndScale(num, vector.scale);
String str = value.toString(scratchBuffer);
if (bloomFilter != null) {
bloomFilter.addString(str);
}
bloomFilterUtf8.addString(str);
}
}
}
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
if (vector instanceof Decimal64ColumnVector) {
writeBatch((Decimal64ColumnVector) vector, offset, length);
} else {
writeBatch((DecimalColumnVector) vector, offset, length);
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
valueStream.getPosition(recorder);
scaleStream.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + valueStream.getBufferSize() +
scaleStream.estimateMemory();
}
@Override
public long getRawDataSize() {
return fileStatistics.getNumberOfValues() *
JavaDataModel.get().lengthOfDecimal();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
valueStream.flush();
scaleStream.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
Consumer<byte[]> updater = CryptoUtils.modifyIvForStripe(stripeId);
valueStream.changeIv(updater);
scaleStream.changeIv(updater);
}
}
| 7,419 | 34.502392 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/DoubleTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.SerializationUtils;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class DoubleTreeWriter extends TreeWriterBase {
private final PositionedOutputStream stream;
private final SerializationUtils utils;
public DoubleTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.stream = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.utils = new SerializationUtils();
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
DoubleColumnVector vec = (DoubleColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
double value = vec.vector[0];
indexStatistics.updateDouble(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addDouble(value);
}
bloomFilterUtf8.addDouble(value);
}
for (int i = 0; i < length; ++i) {
utils.writeDouble(stream, value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
double value = vec.vector[i + offset];
utils.writeDouble(stream, value);
indexStatistics.updateDouble(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addDouble(value);
}
bloomFilterUtf8.addDouble(value);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
stream.flush();
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
stream.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + stream.getBufferSize();
}
@Override
public long getRawDataSize() {
long num = fileStatistics.getNumberOfValues();
return num * JavaDataModel.get().primitive2();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
stream.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
stream.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 4,021 | 31.435484 | 75 |
java
|
null |
orc-main/java/core/src/java/org/apache/orc/impl/writer/EncryptionTreeWriter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.DataMask;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.TypeUtils;
import java.io.IOException;
/**
* TreeWriter that handles column encryption.
* We create a TreeWriter for each of the alternatives with an WriterContext
* that creates encrypted streams.
*/
public class EncryptionTreeWriter implements TreeWriter {
// the different writers
private final TreeWriter[] childrenWriters;
private final DataMask[] masks;
// a column vector that we use to apply the masks
private final ColumnVector scratch;
private final VectorizedRowBatch scratchBatch;
EncryptionTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
scratch = TypeUtils.createColumn(schema, TypeDescription.RowBatchVersion.USE_DECIMAL64, 1024);
childrenWriters = new TreeWriterBase[2];
masks = new DataMask[childrenWriters.length];
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
scratchBatch = new VectorizedRowBatch(schema.getChildren().size(), 1024);
} else {
scratchBatch = new VectorizedRowBatch(1, 1024);
}
// no mask, encrypted data
masks[0] = null;
childrenWriters[0] = Factory.createSubtree(schema, encryption, context);
// masked unencrypted
masks[1] = context.getUnencryptedMask(schema.getId());
childrenWriters[1] = Factory.createSubtree(schema, null, context);
}
@Override
public void writeRootBatch(VectorizedRowBatch batch, int offset,
int length) throws IOException {
scratchBatch.ensureSize(offset + length);
for(int alt=0; alt < childrenWriters.length; ++alt) {
// if there is a mask, apply it to each column
if (masks[alt] != null) {
for(int col=0; col < scratchBatch.cols.length; ++col) {
masks[alt].maskData(batch.cols[col], scratchBatch.cols[col], offset,
length);
}
childrenWriters[alt].writeRootBatch(scratchBatch, offset, length);
} else {
childrenWriters[alt].writeRootBatch(batch, offset, length);
}
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
scratch.ensureSize(length, false);
for(int alt=0; alt < childrenWriters.length; ++alt) {
// if there is a mask, apply it to each column
if (masks[alt] != null) {
masks[alt].maskData(vector, scratch, offset, length);
childrenWriters[alt].writeBatch(scratch, offset, length);
} else {
childrenWriters[alt].writeBatch(vector, offset, length);
}
}
}
@Override
public void createRowIndexEntry() throws IOException {
for(TreeWriter child: childrenWriters) {
child.createRowIndexEntry();
}
}
@Override
public void flushStreams() throws IOException {
for(TreeWriter child: childrenWriters) {
child.flushStreams();
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
for(TreeWriter child: childrenWriters) {
child.writeStripe(requiredIndexEntries);
}
}
@Override
public void addStripeStatistics(StripeStatistics[] stripeStatistics
) throws IOException {
for(TreeWriter child: childrenWriters) {
child.addStripeStatistics(stripeStatistics);
}
}
@Override
public long estimateMemory() {
long result = 0;
for (TreeWriter writer : childrenWriters) {
result += writer.estimateMemory();
}
return result;
}
@Override
public long getRawDataSize() {
// return the size of the encrypted data
return childrenWriters[0].getRawDataSize();
}
@Override
public void prepareStripe(int stripeId) {
for (TreeWriter writer : childrenWriters) {
writer.prepareStripe(stripeId);
}
}
@Override
public void writeFileStatistics() throws IOException {
for (TreeWriter child : childrenWriters) {
child.writeFileStatistics();
}
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
childrenWriters[0].getCurrentStatistics(output);
}
}
| 5,275 | 31.770186 | 98 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.