index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/HollowEffigy.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import java.util.ArrayList;
import java.util.List;
/**
* The HollowEffigy is an Object-based representation of a Hollow record,
* it is used in creation of the diff HTML.
*/
public class HollowEffigy {
private final HollowEffigyFactory factory;
private final String objectType;
final HollowTypeDataAccess dataAccess;
final int ordinal;
private List<Field> fields;
public HollowEffigy(String objectType) {
this.factory = null;
this.objectType = objectType;
this.dataAccess = null;
this.ordinal = -1;
this.fields = new ArrayList<Field>();
}
public HollowEffigy(HollowEffigyFactory factory, HollowTypeDataAccess dataAccess, int ordinal) {
this.factory = factory;
this.objectType = null;
this.dataAccess = dataAccess;
this.ordinal = ordinal;
}
public void add(HollowEffigy.Field field) {
fields.add(field);
}
public String getObjectType() {
if(objectType != null)
return objectType;
return dataAccess.getSchema().getName();
}
public HollowTypeDataAccess getDataAccess() {
return dataAccess;
}
public int getOrdinal() {
return ordinal;
}
public List<Field> getFields() {
if(fields == null)
fields = factory.createFields(this);
return fields;
}
public static class Field {
private final String fieldName;
private final String typeName;
private final Object value;
private final int hashCode;
public Field(String fieldName, HollowEffigy value) {
this(fieldName, value.getObjectType(), value);
}
public Field(String fieldName, String typeName, Object value) {
this.fieldName = fieldName;
this.typeName = typeName;
this.value = value;
this.hashCode = 31 * (31 * (fieldName == null ? 0 : fieldName.hashCode()) + typeName.hashCode()) + (value == null ? 0 : value.hashCode());
}
public String getTypeName() {
return typeName;
}
public String getFieldName() {
return fieldName;
}
public Object getValue() {
return value;
}
public boolean isLeafNode() {
return !(value instanceof HollowEffigy);
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object other) {
if(this == other)
return true;
if(other instanceof Field) {
if(this.fieldName.equals(((Field) other).fieldName) && this.typeName.equals(((Field) other).typeName)) {
if(this.value == null)
return ((Field) other).value == null;
return this.value.equals(((Field) other).value);
}
}
return false;
}
}
public static enum CollectionType {
NONE,
MAP,
COLLECTION
}
@Override
public int hashCode() {
int hashcode = 31 + getFields().hashCode();
return hashcode;
}
@Override
public boolean equals(Object other) {
if(this == other)
return true;
if(other instanceof HollowEffigy) {
HollowEffigy otherEffigy = (HollowEffigy) other;
return this.getFields().equals(otherEffigy.getFields());
}
return false;
}
}
| 9,500 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyDiffRecord.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
import com.netflix.hollow.diffview.effigy.HollowEffigy.Field;
import java.util.HashMap;
import java.util.Map;
public class HollowEffigyDiffRecord {
private final Map<HollowEffigy.Field, FieldDiffCount> map;
private final SimilarityDifferenceCounter simDiffCount = new SimilarityDifferenceCounter();
private int totalOriginalFieldCount;
private int runId;
public HollowEffigyDiffRecord(HollowEffigy basedOn) {
this.map = new HashMap<HollowEffigy.Field, FieldDiffCount>();
traverseOriginalFields(basedOn);
}
private void traverseOriginalFields(HollowEffigy effigy) {
for(Field field : effigy.getFields()) {
if(field.isLeafNode()) {
FieldDiffCount fieldCount = map.get(field);
if(fieldCount == null) {
fieldCount = new FieldDiffCount();
map.put(field, fieldCount);
}
fieldCount.incrementOriginalCount();
totalOriginalFieldCount++;
} else {
traverseOriginalFields((HollowEffigy) field.getValue());
}
}
}
public int calculateDiff(HollowEffigy comparison, int maxDiff) {
runId++;
simDiffCount.reset();
traverseComparisonFields(comparison, maxDiff);
if(simDiffCount.diffCount >= maxDiff)
return HollowEffigyCollectionPairer.MAX_MATRIX_ELEMENT_FIELD_VALUE;
return score();
}
public void traverseComparisonFields(HollowEffigy comparison, int maxDiff) {
for(Field field : comparison.getFields()) {
if(field.isLeafNode()) {
FieldDiffCount fieldCount = map.get(field);
if(fieldCount == null) {
if(simDiffCount.diffCount+1 >= maxDiff) {
simDiffCount.diffCount++;
return;
}
fieldCount = new FieldDiffCount();
map.put(field, fieldCount);
}
if(fieldCount.incrementComparisonCount(runId)) {
if(++simDiffCount.diffCount >= maxDiff)
return;
} else {
simDiffCount.simCount++;
}
} else {
traverseComparisonFields((HollowEffigy) field.getValue(), maxDiff);
if(simDiffCount.diffCount >= maxDiff)
return;
}
}
}
private int score() {
int totalDiff = (totalOriginalFieldCount - simDiffCount.simCount) + simDiffCount.diffCount;
if(simDiffCount.simCount == 0 && totalDiff != 0)
return HollowEffigyCollectionPairer.MAX_MATRIX_ELEMENT_FIELD_VALUE;
return totalDiff;
}
private class FieldDiffCount {
private int originalCount;
private int comparisonCount;
private int lastComparisonUpdatedRunId;
private void incrementOriginalCount() {
originalCount++;
}
private boolean incrementComparisonCount(int runId) {
clearComparisonIfRunChanged(runId);
return ++comparisonCount > originalCount;
}
private void clearComparisonIfRunChanged(int runId) {
if(runId != lastComparisonUpdatedRunId) {
comparisonCount = 0;
lastComparisonUpdatedRunId = runId;
}
}
}
private static class SimilarityDifferenceCounter {
private int simCount;
private int diffCount;
public void reset() {
simCount = 0;
diffCount = 0;
}
}
}
| 9,501 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyCollectionPairer.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.core.index.key.PrimaryKey;
import com.netflix.hollow.core.memory.encoding.HashCodes;
import com.netflix.hollow.core.read.HollowReadFieldUtils;
import com.netflix.hollow.core.read.dataaccess.HollowObjectTypeDataAccess;
import com.netflix.hollow.core.schema.HollowObjectSchema;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.List;
public class HollowEffigyCollectionPairer extends HollowEffigyFieldPairer {
static final int MAX_MATRIX_ELEMENT_FIELD_VALUE = 0x1FFFFF;
private final PrimaryKey matchHint;
public HollowEffigyCollectionPairer(HollowEffigy fromCollection, HollowEffigy toCollection, PrimaryKey matchHint) {
super(fromCollection, toCollection);
this.matchHint = matchHint;
}
@Override
public List<EffigyFieldPair> pair() {
if(matchHint != null)
return pairByMatchHint();
return pairByMinDifference();
}
private List<EffigyFieldPair> pairByMatchHint() {
List<EffigyFieldPair> fieldPairs = new ArrayList<EffigyFieldPair>();
if(from.getFields().size() == 0) {
for(int i=0;i<to.getFields().size();i++)
fieldPairs.add(new EffigyFieldPair(null, to.getFields().get(i), -1, i));
return fieldPairs;
} else if(to.getFields().size() == 0) {
for(int i=0;i<from.getFields().size();i++)
fieldPairs.add(new EffigyFieldPair(from.getFields().get(i), null, i, -1));
}
int toFieldPathIndexes[][] = new int[matchHint.numFields()][];
int fromFieldPathIndexes[][] = new int[matchHint.numFields()][];
for(int i=0;i<matchHint.numFields();i++) {
toFieldPathIndexes[i] = matchHint.getFieldPathIndex(to.getDataAccess().getDataAccess(), i);
fromFieldPathIndexes[i] = matchHint.getFieldPathIndex(from.getDataAccess().getDataAccess(), i);
}
int hashedToFieldIndexes[] = new int[HashCodes.hashTableSize(to.getFields().size())];
Arrays.fill(hashedToFieldIndexes, -1);
for(int i=0;i<to.getFields().size();i++) {
HollowEffigy comparisonEffigy = getComparisonEffigy((HollowEffigy) to.getFields().get(i).getValue());
int hash = hashCode(comparisonEffigy, toFieldPathIndexes);
hash &= hashedToFieldIndexes.length-1;
while(hashedToFieldIndexes[hash] != -1) {
hash++;
hash &= hashedToFieldIndexes.length - 1;
}
hashedToFieldIndexes[hash] = i;
}
BitSet matchedToElements = new BitSet(to.getFields().size());
BitSet matchedFromElements = new BitSet(from.getFields().size());
for(int i=0;i<from.getFields().size();i++) {
HollowEffigy fromEffigy = getComparisonEffigy((HollowEffigy) from.getFields().get(i).getValue());
int hash = hashCode(fromEffigy, fromFieldPathIndexes);
hash &= hashedToFieldIndexes.length-1;
while(hashedToFieldIndexes[hash] != -1) {
int toIdx = hashedToFieldIndexes[hash];
if(!matchedToElements.get(toIdx)) {
HollowEffigy toEffigy = getComparisonEffigy((HollowEffigy) to.getFields().get(toIdx).getValue());
if(recordsMatch(fromEffigy, toEffigy, fromFieldPathIndexes, toFieldPathIndexes)) {
fieldPairs.add(new EffigyFieldPair(from.getFields().get(i), to.getFields().get(toIdx), i, toIdx));
matchedFromElements.set(i);
matchedToElements.set(toIdx);
}
}
hash++;
hash &= hashedToFieldIndexes.length - 1;
}
}
addUnmatchedElements(fieldPairs, matchedFromElements, matchedToElements);
return fieldPairs;
}
private boolean recordsMatch(HollowEffigy fromElement, HollowEffigy toElement, int[][] fromFieldPathIndexes, int[][] toFieldPathIndexes) {
for(int i=0;i<fromFieldPathIndexes.length;i++) {
if(!fieldsAreEqual(fromElement, toElement, fromFieldPathIndexes[i], toFieldPathIndexes[i]))
return false;
}
return true;
}
private boolean fieldsAreEqual(HollowEffigy fromElement, HollowEffigy toElement, int[] fromFieldPath, int[] toFieldPath) {
HollowObjectTypeDataAccess fromDataAccess = (HollowObjectTypeDataAccess) fromElement.getDataAccess();
int fromOrdinal = fromElement.getOrdinal();
HollowObjectTypeDataAccess toDataAccess = (HollowObjectTypeDataAccess) toElement.getDataAccess();
int toOrdinal = toElement.getOrdinal();
HollowObjectSchema fromSchema = fromDataAccess.getSchema();
HollowObjectSchema toSchema = toDataAccess.getSchema();
for (int i = 0; i < fromFieldPath.length - 1; i++) {
int fromFieldPosition = fromFieldPath[i];
int toFieldPosition = toFieldPath[i];
fromOrdinal = fromDataAccess.readOrdinal(fromOrdinal, fromFieldPosition);
toOrdinal = toDataAccess.readOrdinal(toOrdinal, toFieldPosition);
fromDataAccess = (HollowObjectTypeDataAccess) fromDataAccess.getDataAccess().getTypeDataAccess(fromSchema.getReferencedType(fromFieldPosition));
toDataAccess = (HollowObjectTypeDataAccess) toDataAccess.getDataAccess().getTypeDataAccess(toSchema.getReferencedType(toFieldPosition));
fromSchema = fromDataAccess.getSchema();
toSchema = toDataAccess.getSchema();
}
return HollowReadFieldUtils.fieldsAreEqual(fromDataAccess, fromOrdinal, fromFieldPath[fromFieldPath.length-1], toDataAccess, toOrdinal, toFieldPath[toFieldPath.length-1]);
}
private int hashCode(HollowEffigy element, int[][] fieldPathIndexes) {
int hash = 0;
for(int i=0;i<fieldPathIndexes.length;i++) {
hash = hash * 31;
hash ^= fieldHashCode(element, fieldPathIndexes[i]);
}
return hash;
}
private int fieldHashCode(HollowEffigy element, int[] fieldPath) {
HollowObjectTypeDataAccess dataAccess = (HollowObjectTypeDataAccess) element.getDataAccess();
HollowObjectSchema schema = dataAccess.getSchema();
int ordinal = element.getOrdinal();
for (int i = 0; i < fieldPath.length - 1; i++) {
int fieldPosition = fieldPath[i];
ordinal = dataAccess.readOrdinal(ordinal, fieldPosition);
dataAccess = (HollowObjectTypeDataAccess) dataAccess.getDataAccess().getTypeDataAccess(schema.getReferencedType(fieldPosition));
schema = dataAccess.getSchema();
}
int fieldHash = HollowReadFieldUtils.fieldHashCode(dataAccess, ordinal, fieldPath[fieldPath.length-1]);
return HashCodes.hashInt(fieldHash);
}
/**
* Finds the element pairings which have the minimum number of differences between them.
*/
private List<EffigyFieldPair> pairByMinDifference() {
List<EffigyFieldPair> fieldPairs = new ArrayList<EffigyFieldPair>();
BitSet pairedFromIndices = new BitSet(from.getFields().size());
BitSet pairedToIndices = new BitSet(to.getFields().size());
int maxDiffBackoff[] = new int[] {1, 2, 4, 8, Integer.MAX_VALUE};
int maxPairs = Math.min(from.getFields().size(), to.getFields().size());
for(int i=0;i<maxDiffBackoff.length && fieldPairs.size() < maxPairs;i++) {
long diffMatrixElements[] = pair(pairedFromIndices, pairedToIndices, maxDiffBackoff[i]);
Arrays.sort(diffMatrixElements);
for(long matrixElement : diffMatrixElements) {
if(fieldPairs.size() == maxPairs)
break;
int diffScore = getDiffScore(matrixElement);
if(diffScore == MAX_MATRIX_ELEMENT_FIELD_VALUE)
break;
int fromIndex = getFromIndex(matrixElement);
int toIndex = getToIndex(matrixElement);
if(pairedFromIndices.get(fromIndex))
continue;
if(pairedToIndices.get(toIndex))
continue;
fieldPairs.add(new EffigyFieldPair(from.getFields().get(fromIndex), to.getFields().get(toIndex), fromIndex, toIndex));
pairedFromIndices.set(fromIndex);
pairedToIndices.set(toIndex);
}
}
addUnmatchedElements(fieldPairs, pairedFromIndices, pairedToIndices);
return fieldPairs;
}
private void addUnmatchedElements(List<EffigyFieldPair> fieldPairs, BitSet pairedFromIndices, BitSet pairedToIndices) {
for(int i=0;i<from.getFields().size();i++) {
if(!pairedFromIndices.get(i))
fieldPairs.add(new EffigyFieldPair(from.getFields().get(i), null, i, -1));
}
for(int i=0;i<to.getFields().size();i++) {
if(!pairedToIndices.get(i))
fieldPairs.add(new EffigyFieldPair(null, to.getFields().get(i), -1, i));
}
}
public long[] pair(final BitSet pairedFromIndices, final BitSet pairedToIndices, final int maxDiff) {
final long diffMatrixElements[] = new long[from.getFields().size() * to.getFields().size()];
int matrixElementIdx = 0;
for(int i=0;i<from.getFields().size();i++) {
final int fromIdx = i;
if(pairedFromIndices.get(fromIdx)) {
for(int j=0;j<to.getFields().size();j++) {
diffMatrixElements[matrixElementIdx++] = getDiffMatrixElement(fromIdx, j, MAX_MATRIX_ELEMENT_FIELD_VALUE);
}
} else {
HollowEffigy fromElement = getComparisonEffigy((HollowEffigy) from.getFields().get(fromIdx).getValue());
HollowEffigyDiffRecord diffRecord = new HollowEffigyDiffRecord(fromElement);
for(int j=0;j<to.getFields().size();j++) {
if(pairedToIndices.get(j)) {
diffMatrixElements[matrixElementIdx++] = getDiffMatrixElement(fromIdx, j, MAX_MATRIX_ELEMENT_FIELD_VALUE);
} else {
HollowEffigy toElement = getComparisonEffigy((HollowEffigy) to.getFields().get(j).getValue());
int diffScore = diffRecord.calculateDiff(toElement, maxDiff);
diffMatrixElements[matrixElementIdx++] = getDiffMatrixElement(fromIdx, j, diffScore);
}
}
}
}
return diffMatrixElements;
}
protected HollowEffigy getComparisonEffigy(HollowEffigy effigy) {
return effigy;
}
private long getDiffMatrixElement(int fromIndex, int toIndex, int diffScore) {
return ((long)diffScore << 42) | ((long)fromIndex << 21) | ((long)toIndex);
}
private int getDiffScore(long diffMatrixElement) {
return (int)((diffMatrixElement >> 42) & MAX_MATRIX_ELEMENT_FIELD_VALUE);
}
private int getFromIndex(long diffMatrixElement) {
return (int)((diffMatrixElement >> 21) & MAX_MATRIX_ELEMENT_FIELD_VALUE);
}
private int getToIndex(long diffMatrixElement) {
return (int)(diffMatrixElement & MAX_MATRIX_ELEMENT_FIELD_VALUE);
}
}
| 9,502 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyFieldPairer.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.core.index.key.PrimaryKey;
import com.netflix.hollow.core.schema.HollowCollectionSchema;
import com.netflix.hollow.core.schema.HollowMapSchema;
import com.netflix.hollow.core.schema.HollowSchema;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
import com.netflix.hollow.diffview.effigy.HollowEffigy.Field;
import java.util.List;
import java.util.Map;
public abstract class HollowEffigyFieldPairer {
protected final HollowEffigy from;
protected final HollowEffigy to;
public HollowEffigyFieldPairer(HollowEffigy from, HollowEffigy to) {
this.from = from;
this.to = to;
}
public abstract List<EffigyFieldPair> pair();
public static class EffigyFieldPair {
private final HollowEffigy.Field from;
private final HollowEffigy.Field to;
private final int fromIdx;
private final int toIdx;
private final boolean isDiff;
public EffigyFieldPair(Field from, Field to, int fromIdx, int toIdx) {
this.from = from;
this.to = to;
this.fromIdx = fromIdx;
this.toIdx = toIdx;
this.isDiff = calculateIsDiff();
}
private boolean calculateIsDiff() {
if((from == null && to != null) || (from != null && to == null))
return true;
if(from.getValue() == null)
return to.getValue() != null;
if(isLeafNode())
return !from.getValue().equals(to.getValue());
return false;
}
public HollowEffigy.Field getFrom() {
return from;
}
public HollowEffigy.Field getTo() {
return to;
}
public int getFromIdx() {
return fromIdx;
}
public int getToIdx() {
return toIdx;
}
public boolean isLeafNode() {
return (from != null && from.getValue() != null) ? from.isLeafNode() : to == null ? true : to.isLeafNode();
}
public boolean isDiff() {
return isDiff;
}
public boolean isOrderingDiff() {
return fromIdx != toIdx;
}
}
public static List<EffigyFieldPair> pair(HollowEffigy from, HollowEffigy to, Map<String, PrimaryKey> matchHints) {
if(from == null || to == null)
return new HollowEffigyNullPartnerPairer(from, to).pair();
if(from.getDataAccess() == null)
return new HollowEffigyObjectPairer(from, to).pair();
HollowSchema schema = from.getDataAccess().getSchema();
switch(schema.getSchemaType()) {
case OBJECT:
return new HollowEffigyObjectPairer(from, to).pair();
case MAP:
String keyType = ((HollowMapSchema)schema).getKeyType();
return new HollowEffigyMapPairer(from, to, matchHints.get(keyType)).pair();
case LIST:
case SET:
String elementType = ((HollowCollectionSchema)schema).getElementType();
return new HollowEffigyCollectionPairer(from, to, matchHints.get(elementType)).pair();
}
throw new IllegalArgumentException("I don't know how to pair fields for type " + schema.getName() + "(" + schema.getSchemaType() + ")");
}
}
| 9,503 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyObjectPairer.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
import com.netflix.hollow.diffview.effigy.HollowEffigy.Field;
import java.util.ArrayList;
import java.util.List;
public class HollowEffigyObjectPairer extends HollowEffigyFieldPairer {
public HollowEffigyObjectPairer(HollowEffigy fromObject, HollowEffigy toObject) {
super(fromObject, toObject);
}
@Override
public List<EffigyFieldPair> pair() {
List<EffigyFieldPair> fieldPairs = new ArrayList<EffigyFieldPair>();
for(Field fromField : from.getFields()) {
fieldPairs.add(new EffigyFieldPair(fromField, getField(to, fromField.getFieldName()), -1, -1));
}
for(Field toField : to.getFields()) {
Field fromField = getField(from, toField.getFieldName());
if(fromField == null)
fieldPairs.add(new EffigyFieldPair(null, toField, -1, -1));
}
return fieldPairs;
}
public Field getField(HollowEffigy effigy, String fieldName) {
for(Field field : effigy.getFields()) {
if(field.getFieldName().equals(fieldName))
return field;
}
return null;
}
}
| 9,504 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyNullPartnerPairer.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
import java.util.ArrayList;
import java.util.List;
public class HollowEffigyNullPartnerPairer extends HollowEffigyFieldPairer {
public HollowEffigyNullPartnerPairer(HollowEffigy from, HollowEffigy to) {
super(from, to);
}
@Override
public List<EffigyFieldPair> pair() {
List<EffigyFieldPair> pairs = new ArrayList<EffigyFieldPair>();
if(from != null) {
for(int i=0;i<from.getFields().size();i++) {
pairs.add(new EffigyFieldPair(from.getFields().get(i), null, i, -1));
}
} else if(to != null) {
for(int i=0;i<to.getFields().size();i++) {
pairs.add(new EffigyFieldPair(null, to.getFields().get(i), -1, i));
}
}
return pairs;
}
}
| 9,505 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/HollowEffigyMapPairer.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer;
import com.netflix.hollow.core.index.key.PrimaryKey;
import com.netflix.hollow.diffview.effigy.HollowEffigy;
public class HollowEffigyMapPairer extends HollowEffigyCollectionPairer {
public HollowEffigyMapPairer(HollowEffigy fromCollection, HollowEffigy toCollection, PrimaryKey matchHint) {
super(fromCollection, toCollection, matchHint);
}
@Override
protected HollowEffigy getComparisonEffigy(HollowEffigy effigy) {
return (HollowEffigy) effigy.getFields().get(0).getValue();
}
}
| 9,506 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/exact/DiffExactRecordMatcher.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer.exact;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
import com.netflix.hollow.tools.diff.exact.DiffEqualOrdinalMap;
import com.netflix.hollow.tools.diff.exact.DiffEqualOrdinalMap.MatchIterator;
import com.netflix.hollow.tools.diff.exact.DiffEqualityMapping;
public class DiffExactRecordMatcher implements ExactRecordMatcher {
private final DiffEqualityMapping equalityMapping;
public DiffExactRecordMatcher(DiffEqualityMapping equalityMapping) {
this.equalityMapping = equalityMapping;
}
@Override
public boolean isExactMatch(HollowTypeDataAccess fromType, int fromOrdinal, HollowTypeDataAccess toType, int toOrdinal) {
if(fromType == null || toType == null)
return false;
DiffEqualOrdinalMap typeMap = equalityMapping.getEqualOrdinalMap(fromType.getSchema().getName());
if(typeMap != null) {
MatchIterator matchingToOrdinals = typeMap.getEqualOrdinals(fromOrdinal);
while(matchingToOrdinals.hasNext()) {
if(toOrdinal == matchingToOrdinals.next())
return true;
}
}
return false;
}
}
| 9,507 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/exact/ExactRecordMatcher.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer.exact;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
public interface ExactRecordMatcher {
public boolean isExactMatch(HollowTypeDataAccess fromType, int fromOrdinal, HollowTypeDataAccess toType, int toOrdinal);
}
| 9,508 |
0 |
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer
|
Create_ds/hollow/hollow-diff-ui/src/main/java/com/netflix/hollow/diffview/effigy/pairer/exact/HistoryExactRecordMatcher.java
|
/*
* Copyright 2016-2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.hollow.diffview.effigy.pairer.exact;
import com.netflix.hollow.core.read.dataaccess.HollowTypeDataAccess;
public class HistoryExactRecordMatcher implements ExactRecordMatcher {
public static HistoryExactRecordMatcher INSTANCE = new HistoryExactRecordMatcher();
private HistoryExactRecordMatcher() { }
@Override
public boolean isExactMatch(HollowTypeDataAccess fromType, int fromOrdinal, HollowTypeDataAccess toType, int toOrdinal) {
return fromType != null && fromType == toType && fromOrdinal == toOrdinal;
}
}
| 9,509 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorPlugin.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
/**
* Hive plugin.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "hive";
private static final HiveTypeConverter HIVE_TYPE_CONVERTER = new HiveTypeConverter();
private static final HiveConnectorInfoConverter INFO_CONVERTER_HIVE
= new HiveConnectorInfoConverter(HIVE_TYPE_CONVERTER);
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(final ConnectorContext connectorContext) {
return new HiveConnectorFactory(
INFO_CONVERTER_HIVE,
connectorContext
);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return HIVE_TYPE_CONVERTER;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorInfoConverter getInfoConverter() {
return INFO_CONVERTER_HIVE;
}
}
| 9,510 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.SpringConnectorFactory;
import com.netflix.metacat.connector.hive.configs.CacheConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorClientConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorFastServiceConfig;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.springframework.core.env.MapPropertySource;
import java.util.HashMap;
import java.util.Map;
/**
* HiveConnectorFactory.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFactory extends SpringConnectorFactory {
/**
* Constructor.
*
* @param infoConverter hive info converter
* @param connectorContext connector config
*/
HiveConnectorFactory(
final HiveConnectorInfoConverter infoConverter,
final ConnectorContext connectorContext
) {
super(infoConverter, connectorContext);
final boolean useLocalMetastore = Boolean.parseBoolean(
connectorContext.getConfiguration()
.getOrDefault(HiveConfigConstants.USE_EMBEDDED_METASTORE, "false")
);
final boolean useFastHiveService = useLocalMetastore && Boolean.parseBoolean(
connectorContext.getConfiguration()
.getOrDefault(HiveConfigConstants.USE_FASTHIVE_SERVICE, "false")
);
final Map<String, Object> properties = new HashMap<>();
properties.put("useHiveFastService", useFastHiveService);
properties.put("useEmbeddedClient", useLocalMetastore);
properties.put("metacat.cache.enabled", connectorContext.getConfig().isCacheEnabled());
super.addEnvProperties(new MapPropertySource("HIVE_CONNECTOR", properties));
super.registerClazz(HiveConnectorFastServiceConfig.class,
HiveConnectorClientConfig.class, HiveConnectorConfig.class, CacheConfig.class);
super.refresh();
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.ctx.getBean(HiveConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.ctx.getBean(HiveConnectorTableService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorPartitionService getPartitionService() {
return this.ctx.getBean(HiveConnectorPartitionService.class);
}
}
| 9,511 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Hive base connector base service impl.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
@Slf4j
public class HiveConnectorTableService implements ConnectorTableService {
private static final String PARAMETER_EXTERNAL = "EXTERNAL";
protected final HiveConnectorInfoConverter hiveMetacatConverters;
protected final ConnectorContext connectorContext;
private final String catalogName;
private final IMetacatHiveClient metacatHiveClient;
private final HiveConnectorDatabaseService hiveConnectorDatabaseService;
private final boolean allowRenameTable;
private final boolean onRenameConvertToExternal;
/**
* Constructor.
*
* @param catalogName catalog name
* @param metacatHiveClient hive client
* @param hiveConnectorDatabaseService hive database service
* @param hiveMetacatConverters converter
* @param connectorContext the connector context
*/
public HiveConnectorTableService(
final String catalogName,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final HiveConnectorInfoConverter hiveMetacatConverters,
final ConnectorContext connectorContext
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
this.hiveConnectorDatabaseService = hiveConnectorDatabaseService;
this.catalogName = catalogName;
this.allowRenameTable = Boolean.parseBoolean(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.ALLOW_RENAME_TABLE, "false")
);
this.onRenameConvertToExternal = Boolean.parseBoolean(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.ON_RENAME_CONVERT_TO_EXTERNAL,
"true")
);
this.connectorContext = connectorContext;
}
/**
* getTable.
*
* @param requestContext The request context
* @param name The qualified name of the resource to get
* @return table dto
*/
@Override
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), name.getTableName());
return hiveMetacatConverters.toTableInfo(name, table);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed get hive table %s", name), exception);
}
}
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException | InvalidObjectException exception) {
//the NoSuchObjectException is converted into InvalidObjectException in hive client
if (exception.getMessage().startsWith(tableName.getDatabaseName())) {
throw new DatabaseNotFoundException(
QualifiedName.ofDatabase(tableName.getCatalogName(),
tableName.getDatabaseName()), exception);
} else {
//table name or column invalid defintion exception
throw new InvalidMetaException(tableName, exception);
}
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
void updateTable(
final ConnectorRequestContext requestContext,
final Table table,
final TableInfo tableInfo
) throws MetaException {
if (table.getParameters() == null || table.getParameters().isEmpty()) {
table.setParameters(Maps.newHashMap());
}
//if this a type of table, we all mark it external table
//otherwise leaves it as such as VIRTUAL_VIEW
if (!isVirtualView(table)) {
table.getParameters().putIfAbsent(PARAMETER_EXTERNAL, "TRUE");
} else {
validAndUpdateVirtualView(table);
}
if (tableInfo.getMetadata() != null) {
table.getParameters().putAll(tableInfo.getMetadata());
}
//no other information is needed for iceberg table
if (connectorContext.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
table.setPartitionKeys(Collections.emptyList());
log.debug("Skipping seder and set partition key to empty when updating iceberg table in hive");
return;
}
//storage
final StorageDescriptor sd = table.getSd() != null ? table.getSd() : new StorageDescriptor();
String inputFormat = null;
String outputFormat = null;
Map<String, String> sdParameters = Maps.newHashMap();
final String location =
tableInfo.getSerde() == null ? null : tableInfo.getSerde().getUri();
if (location != null) {
sd.setLocation(location);
} else if (sd.getLocation() == null) {
final String locationStr = hiveConnectorDatabaseService.get(requestContext,
QualifiedName.ofDatabase(tableInfo.getName().
getCatalogName(), tableInfo.getName().getDatabaseName())).getUri();
final Path databasePath = new Path(locationStr);
final Path targetPath = new Path(databasePath, tableInfo.getName().getTableName());
sd.setLocation(targetPath.toString());
}
if (sd.getSerdeInfo() == null) {
sd.setSerdeInfo(new SerDeInfo());
}
final SerDeInfo serdeInfo = sd.getSerdeInfo();
serdeInfo.setName(tableInfo.getName().getTableName());
final StorageInfo storageInfo = tableInfo.getSerde();
if (storageInfo != null) {
if (!Strings.isNullOrEmpty(storageInfo.getSerializationLib())) {
serdeInfo.setSerializationLib(storageInfo.getSerializationLib());
}
if (storageInfo.getSerdeInfoParameters() != null && !storageInfo.getSerdeInfoParameters().isEmpty()) {
serdeInfo.setParameters(storageInfo.getSerdeInfoParameters());
}
inputFormat = storageInfo.getInputFormat();
outputFormat = storageInfo.getOutputFormat();
if (storageInfo.getParameters() != null && !storageInfo.getParameters().isEmpty()) {
sdParameters = storageInfo.getParameters();
}
} else if (table.getSd() != null) {
final HiveStorageFormat hiveStorageFormat = this.extractHiveStorageFormat(table);
serdeInfo.setSerializationLib(hiveStorageFormat.getSerde());
serdeInfo.setParameters(ImmutableMap.of());
inputFormat = hiveStorageFormat.getInputFormat();
outputFormat = hiveStorageFormat.getOutputFormat();
}
final ImmutableList.Builder<FieldSchema> columnsBuilder = ImmutableList.builder();
final ImmutableList.Builder<FieldSchema> partitionKeysBuilder = ImmutableList.builder();
if (tableInfo.getFields() != null) {
for (FieldInfo column : tableInfo.getFields()) {
final FieldSchema field = hiveMetacatConverters.metacatToHiveField(column);
if (column.isPartitionKey()) {
partitionKeysBuilder.add(field);
} else {
columnsBuilder.add(field);
}
}
}
final ImmutableList<FieldSchema> columns = columnsBuilder.build();
if (!columns.isEmpty()) {
sd.setCols(columns);
}
if (!Strings.isNullOrEmpty(inputFormat)) {
sd.setInputFormat(inputFormat);
}
if (!Strings.isNullOrEmpty(outputFormat)) {
sd.setOutputFormat(outputFormat);
}
if (sd.getParameters() == null) {
sd.setParameters(sdParameters);
}
//partition keys
final ImmutableList<FieldSchema> partitionKeys = partitionKeysBuilder.build();
if (!partitionKeys.isEmpty()) {
table.setPartitionKeys(partitionKeys);
}
table.setSd(sd);
}
private void validAndUpdateVirtualView(final Table table) {
if (isVirtualView(table)
&& Strings.isNullOrEmpty(table.getViewOriginalText())) {
throw new MetacatBadRequestException(
String.format("Invalid view creation for %s/%s. Missing viewOrginialText",
table.getDbName(),
table.getDbName()));
}
if (Strings.isNullOrEmpty(table.getViewExpandedText())) {
//set viewExpandedText to viewOriginalTest
table.setViewExpandedText(table.getViewOriginalText());
}
//setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs
if (Strings.isNullOrEmpty(table.getSd().getLocation())) {
table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName());
}
}
private boolean isVirtualView(final Table table) {
return null != table.getTableType()
&& table.getTableType().equals(TableType.VIRTUAL_VIEW.toString());
}
/**
* Delete a table with the given qualified name.
*
* @param requestContext The request context
* @param name The qualified name of the resource to delete
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
metacatHiveClient.dropTable(name.getDatabaseName(), name.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed delete hive table %s", name), exception);
}
}
/**
* Update a resource with the given metadata.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final Table existingTable = hiveMetacatConverters.fromTableInfo(get(requestContext, tableInfo.getName()));
update(requestContext, existingTable, tableInfo);
}
protected void update(final ConnectorRequestContext requestContext,
final Table existingTable, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
updateTable(requestContext, existingTable, tableInfo);
metacatHiveClient.alterTable(tableName.getDatabaseName(),
tableName.getTableName(),
existingTable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed update hive table %s", tableName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
if (tableFilter == null || tableName.startsWith(tableFilter)) {
final QualifiedName qualifiedName =
QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
qualifiedNames.add(qualifiedName);
}
}
////supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listNames hive table %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<TableInfo> tableInfos = Lists.newArrayList();
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
}
return ConnectorUtils.paginate(tableInfos, pageable);
} catch (MetaException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
boolean result;
try {
result = metacatHiveClient.getTableByName(name.getDatabaseName(), name.getTableName()) != null;
} catch (NoSuchObjectException exception) {
result = false;
} catch (TException exception) {
throw new ConnectorException(String.format("Failed exists hive table %s", name), exception);
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public void rename(
final ConnectorRequestContext context,
final QualifiedName oldName,
final QualifiedName newName
) {
if (!allowRenameTable) {
throw new ConnectorException(
"Renaming tables is disabled in catalog " + catalogName, null);
}
try {
if (onRenameConvertToExternal) {
//
// If this is a managed table(EXTERNAL=FALSE), then convert it to an external table before renaming it.
// We do not want the metastore to move the location/data.
//
final Table table = metacatHiveClient.getTableByName(oldName.getDatabaseName(), oldName.getTableName());
Map<String, String> parameters = table.getParameters();
if (parameters == null) {
parameters = Maps.newHashMap();
table.setParameters(parameters);
}
if (!parameters.containsKey(PARAMETER_EXTERNAL)
|| parameters.get(PARAMETER_EXTERNAL).equalsIgnoreCase("FALSE")) {
parameters.put(PARAMETER_EXTERNAL, "TRUE");
metacatHiveClient.alterTable(oldName.getDatabaseName(), oldName.getTableName(), table);
}
}
metacatHiveClient.rename(oldName.getDatabaseName(), oldName.getTableName(),
newName.getDatabaseName(), newName.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(oldName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(newName, exception);
} catch (TException exception) {
throw new ConnectorException(
"Failed renaming from hive table" + oldName.toString()
+ " to hive talbe " + newName.toString(), exception);
}
}
private HiveStorageFormat extractHiveStorageFormat(final Table table) throws MetaException {
final StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new MetaException("Table is missing storage descriptor");
}
final SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new MetaException(
"Table storage descriptor is missing SerDe info");
}
final String outputFormat = descriptor.getOutputFormat();
final String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerde().equals(serializationLib)) {
return format;
}
}
throw new MetaException(
String.format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
@Override
public List<QualifiedName> getTableNames(
final ConnectorRequestContext context,
final QualifiedName name,
final String filter,
@Nullable final Integer limit) {
try {
if (name.isDatabaseDefinition()) {
return metacatHiveClient.getTableNames(name.getDatabaseName(), filter, limit == null ? -1 : limit)
.stream()
.map(n -> QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), n))
.collect(Collectors.toList());
} else {
int limitSize = limit == null || limit < 0 ? Integer.MAX_VALUE : limit;
final List<String> databaseNames = metacatHiveClient.getAllDatabases();
final List<QualifiedName> result = Lists.newArrayList();
for (int i = 0; i < databaseNames.size() && limitSize > 0; i++) {
final String databaseName = databaseNames.get(i);
final List<String> tableNames =
metacatHiveClient.getTableNames(databaseName, filter, limitSize);
limitSize = limitSize - tableNames.size();
result.addAll(tableNames.stream()
.map(n -> QualifiedName.ofTable(name.getCatalogName(), databaseName, n))
.collect(Collectors.toList()));
}
return result;
}
} catch (TException e) {
final String message = String.format("Failed getting the table names for database %s", name);
log.error(message);
throw new ConnectorException(message);
}
}
}
| 9,512 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveStorageFormat.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NonNull;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat;
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
/**
* Hive storage format.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
@AllArgsConstructor
public enum HiveStorageFormat {
/**
* Optimized Row Columnar.
*/
ORC(OrcSerde.class.getName(),
OrcInputFormat.class.getName(),
OrcOutputFormat.class.getName()),
/**
* PARQUET.
*/
PARQUET(ParquetHiveSerDe.class.getName(),
MapredParquetInputFormat.class.getName(),
MapredParquetOutputFormat.class.getName()),
/**
* RCBINARY.
*/
RCBINARY(LazyBinaryColumnarSerDe.class.getName(),
RCFileInputFormat.class.getName(),
RCFileOutputFormat.class.getName()),
/**
* RCTEXT.
*/
RCTEXT(ColumnarSerDe.class.getName(),
RCFileInputFormat.class.getName(),
RCFileOutputFormat.class.getName()),
/**
* SEQUENCEFILE.
*/
SEQUENCEFILE(LazySimpleSerDe.class.getName(),
SequenceFileInputFormat.class.getName(),
HiveSequenceFileOutputFormat.class.getName()),
/**
* TEXTFILE.
*/
TEXTFILE(LazySimpleSerDe.class.getName(),
TextInputFormat.class.getName(),
HiveIgnoreKeyTextOutputFormat.class.getName());
@NonNull
private final String serde;
@NonNull
private final String inputFormat;
@NonNull
private final String outputFormat;
}
| 9,513 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.sql.PartitionHolder;
import lombok.Getter;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* HiveConnectorPartitionService.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
public class HiveConnectorPartitionService implements ConnectorPartitionService {
protected final ConnectorContext context;
private final String catalogName;
private final HiveConnectorInfoConverter hiveMetacatConverters;
private final IMetacatHiveClient metacatHiveClient;
/**
* Constructor.
*
* @param context connector context
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
*/
public HiveConnectorPartitionService(
final ConnectorContext context,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
this.catalogName = context.getCatalogName();
this.context = context;
}
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final List<Partition> partitions = getPartitions(tableName,
partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable());
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final TableInfo tableInfo
) {
try {
return metacatHiveClient.getPartitionCount(tableName.getDatabaseName(), tableName.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions count for hive table %s", tableName), e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(),
tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression,
partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
private List<Partition> getPartitions(
final QualifiedName tableName,
@Nullable final String filter,
@Nullable final List<String> partitionIds,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
List<Partition> partitionList = null;
if (!Strings.isNullOrEmpty(filter)) {
partitionList = metacatHiveClient.listPartitionsByFilter(databasename,
tablename, filter);
} else {
if (partitionIds != null) {
partitionList = metacatHiveClient.getPartitions(databasename,
tablename, partitionIds);
}
if (partitionList == null || partitionList.isEmpty()) {
partitionList = metacatHiveClient.getPartitions(databasename,
tablename, null);
}
}
final List<Partition> filteredPartitionList = Lists.newArrayList();
partitionList.forEach(partition -> {
final String partitionName = getNameOfPartition(table, partition);
if (partitionIds == null || partitionIds.contains(partitionName)) {
filteredPartitionList.add(partition);
}
});
if (sort != null) {
if (sort.getOrder() == SortOrder.DESC) {
filteredPartitionList.sort(Collections.reverseOrder());
} else {
Collections.sort(filteredPartitionList);
}
}
return ConnectorUtils.paginate(filteredPartitionList, pageable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionUris(
final ConnectorRequestContext requestContext,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo
) {
final List<String> uris = Lists.newArrayList();
for (Partition partition : getPartitions(table, partitionsRequest.getFilter(),
partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable())) {
uris.add(partition.getSd().getLocation());
}
return uris;
}
/**
* By default(checkIfExists=true and aletrIfExists=false), this method adds the provided list of partitions.
* If a partition already exists, it is dropped first before adding it.
* If checkIfExists=false, the method adds the partitions to the table. If a partition already exists,
* an AlreadyExistsException error is thrown.
* If alterIfExists=true, the method updates existing partitions and adds non-existant partitions.
* If a partition in the provided partition list has all the details, then it is used. If the details are missing,
* then the table details are inherited. This is mostly for the storage information.
*/
@Override
public PartitionsSaveResponse savePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableQName,
final PartitionsSaveRequest partitionsSaveRequest
) {
final String databaseName = tableQName.getDatabaseName();
final String tableName = tableQName.getTableName();
final Table table;
try {
table = metacatHiveClient.getTableByName(databaseName, tableName);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableQName, exception);
} catch (TException e) {
throw new ConnectorException(String.format("Failed getting hive table %s", tableQName), e);
}
// New partitions
final List<PartitionInfo> addedPartitionInfos = Lists.newArrayList();
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
final List<String> partitionNames = partitionInfos.stream()
.map(part -> {
final String partitionName = part.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
// New partition names
final List<String> addedPartitionNames = Lists.newArrayList();
// Updated partition names
final List<String> existingPartitionNames = Lists.newArrayList();
// Existing partitions
final List<PartitionHolder> existingPartitionHolders = Lists.newArrayList();
// Existing partition map
Map<String, PartitionHolder> existingPartitionMap = Collections.emptyMap();
//
// If either checkIfExists or alterIfExists is true, check to see if any of the partitions already exists.
// If it exists and if alterIfExists=false, we will drop it before adding.
// If it exists and if alterIfExists=true, we will alter it.
//
if (partitionsSaveRequest.getCheckIfExists() || partitionsSaveRequest.getAlterIfExists()) {
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final PartitionHolder existingPartitionHolder = existingPartitionMap.get(partitionName);
if (existingPartitionHolder == null) {
addedPartitionNames.add(partitionName);
addedPartitionInfos.add(partitionInfo);
} else {
final String partitionUri =
partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
final String existingPartitionUri = getPartitionUri(existingPartitionHolder);
if (partitionUri == null || !partitionUri.equals(existingPartitionUri)) {
existingPartitionNames.add(partitionName);
// We need to copy the existing partition info and
if (partitionInfo.getSerde() == null) {
partitionInfo.setSerde(new StorageInfo());
}
if (partitionInfo.getAudit() == null) {
partitionInfo.setAudit(new AuditInfo());
}
if (StringUtils.isBlank(partitionUri)) {
partitionInfo.getSerde().setUri(existingPartitionUri);
}
//the partition exists, we should not do anything for the partition exists
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
if (existingPartitionHolder.getPartition() != null) {
final Partition existingPartition = existingPartitionHolder.getPartition();
partitionInfo.getSerde().setParameters(existingPartition.getParameters());
partitionInfo.getAudit().setCreatedDate(
HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getCreateTime()));
partitionInfo.getAudit().setLastModifiedDate(
HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getLastAccessTime()));
} else {
final PartitionInfo existingPartitionInfo = existingPartitionHolder.getPartitionInfo();
if (existingPartitionInfo.getSerde() != null) {
partitionInfo.getSerde()
.setParameters(existingPartitionInfo.getSerde().getParameters());
}
if (existingPartitionInfo.getAudit() != null) {
partitionInfo.getAudit()
.setCreatedDate(existingPartitionInfo.getAudit().getCreatedDate());
partitionInfo.getAudit()
.setLastModifiedDate(existingPartitionInfo.getAudit().getLastModifiedDate());
}
}
existingPartitionHolder.setPartitionInfo(partitionInfo);
existingPartitionHolders.add(existingPartitionHolder);
} else {
addedPartitionInfos.add(partitionInfo);
}
}
}
}
final Set<String> deletePartitionNames = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionNames.addAll(existingPartitionNames);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionNames.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders,
deletePartitionNames);
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionNames);
result.setUpdated(existingPartitionNames);
return result;
}
protected void addUpdateDropPartitions(final QualifiedName tableQName,
final Table table,
final List<String> partitionNames,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionInfos,
final Set<String> deletePartitionNames) {
final String databaseName = table.getDbName();
final String tableName = table.getTableName();
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableQName, table);
try {
final List<Partition> existingPartitions = existingPartitionInfos.stream()
.map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p.getPartitionInfo()))
.collect(Collectors.toList());
final List<Partition> addedPartitions = addedPartitionInfos.stream()
.map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p)).collect(Collectors.toList());
// If alterIfExists=true, then alter partitions if they already exists
if (!existingPartitionInfos.isEmpty()) {
copyTableSdToPartitionSd(existingPartitions, table);
metacatHiveClient.alterPartitions(databaseName,
tableName, existingPartitions);
}
// Copy the storage details from the table if the partition does not contain the details.
copyTableSdToPartitionSd(addedPartitions, table);
// Drop partitions with ids in 'deletePartitionNames' and add 'addedPartitionInfos' partitions
metacatHiveClient.addDropPartitions(databaseName,
tableName, addedPartitions, Lists.newArrayList(deletePartitionNames));
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableQName, "", exception);
} else {
throw new TableNotFoundException(tableQName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
private String getPartitionUri(final PartitionHolder partition) {
String result = null;
if (partition.getPartition() != null) {
final Partition hivePartition = partition.getPartition();
result = hivePartition.getSd() != null ? hivePartition.getSd().getLocation() : null;
} else if (partition.getPartitionInfo() != null) {
final PartitionInfo partitionInfo = partition.getPartitionInfo();
result = partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public void deletePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo
) {
try {
metacatHiveClient.dropPartitions(tableName.getDatabaseName(), tableName.getTableName(), partitionNames);
} catch (MetaException | NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
//not sure which qualified name to use here
throw new ConnectorException(String.format("Failed delete partitions for hive table %s", tableName), e);
}
}
/**
* Returns the list of partition keys.
*
* @param fields fields
* @return partition keys
*/
protected List<String> getPartitionKeys(final List<FieldSchema> fields) {
return (fields != null) ? fields.stream().map(FieldSchema::getName).collect(Collectors.toList())
: Lists.newArrayList();
}
protected Map<String, PartitionHolder> getPartitionsByNames(final Table table,
final List<String> partitionNames) {
final String databasename = table.getDbName();
final String tablename = table.getTableName();
try {
final List<Partition> partitions =
metacatHiveClient.getPartitions(databasename, tablename, partitionNames);
return partitions.stream().map(PartitionHolder::new).collect(Collectors.toMap(part -> {
try {
return Warehouse.makePartName(table.getPartitionKeys(), part.getPartition().getValues());
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}, Function.identity()));
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
//
// Update the partition info based on that of the table.
//
for (Partition partition : hivePartitions) {
final StorageDescriptor sd = partition.getSd();
final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
if (tableSdCopy.getSerdeInfo() == null) {
final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
tableSdCopy.setSerdeInfo(serDeInfo);
}
tableSdCopy.setLocation(sd.getLocation());
if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
tableSdCopy.setInputFormat(sd.getInputFormat());
}
if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
tableSdCopy.setOutputFormat(sd.getOutputFormat());
}
if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
tableSdCopy.setParameters(sd.getParameters());
}
if (sd.getSerdeInfo() != null) {
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
}
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
}
if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
}
}
partition.setSd(tableSdCopy);
}
}
private String getNameOfPartition(final Table table, final Partition partition) {
try {
return Warehouse.makePartName(table.getPartitionKeys(), partition.getValues());
} catch (TException e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}
}
| 9,514 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/IMetacatHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.connector.hive.client.embedded.HivePrivilege;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Set;
/**
* IMetacatHiveClient.
*
* @author zhenl
* @since 1.0.0
*/
public interface IMetacatHiveClient {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported for this client";
/**
* Create database.
*
* @param database database metadata
* @throws TException already exist TException
*/
default void createDatabase(final Database database) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Drop database.
*
* @param dbName database name
* @throws TException NotfoundException
*/
default void dropDatabase(final String dbName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @return database database
* @throws TException NotfoundException
*/
default Database getDatabase(final String databaseName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* List all databases.
*
* @return database list
* @throws TException exceptions
*/
default List<String> getAllDatabases() throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get all tables.
*
* @param databaseName databasename
* @return tableNames
* @throws TException metaexception
*/
default List<String> getAllTables(final String databaseName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get all tables.
*
* @param databaseName databasename
* @param filter filter
* @param limit list size
* @return list of table names
* @throws TException metaexception
*/
default List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @param tableName tableName
* @return table information
* @throws TException NotfoundException
*/
default Table getTableByName(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Create table.
*
* @param table database metadata
* @throws TException already exist TException
*/
default void createTable(final Table table) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Delete table.
*
* @param databaseName database
* @param tableName tableName
* @throws TException NotfoundException
*/
default void dropTable(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Rename table.
*
* @param databaseName database
* @param oldTableName tablename
* @param newdatabadeName newdatabase
* @param newTableName newName
* @throws TException NotfoundException
*/
default void rename(final String databaseName,
final String oldTableName,
final String newdatabadeName,
final String newTableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Update table.
*
* @param databaseName databaseName
* @param tableName tableName
* @param table table
* @throws TException if the database does not exist
*/
default void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Update table.
*
* @param databaseName databaseName
* @param database table
* @throws TException if the database does not exist
*/
default void alterDatabase(final String databaseName,
final Database database) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @param tableName tableName
* @param partitionNames partitionName
* @return list of partitions
* @throws TException TException
*/
default List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Drop a list of partitions.
*
* @param databaseName databaseName
* @param tableName tableName
* @param partitionNames partitionNames
* @throws TException TException
*/
default void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* List partitions.
*
* @param databaseName databaseName
* @param tableName tableName
* @param filter filter
* @return List of partitions
* @throws TException TException
*/
default List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get partition count.
*
* @param databaseName databaseName
* @param tableName tableName
* @return partition count
* @throws TException TException
*/
default int getPartitionCount(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get partition keys.
*
* @param databaseName databaseName
* @param tableName tableName
* @return list of partition names
* @throws TException TException
*/
default List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Save partitions.
*
* @param partitions partitions
* @throws TException TException
*/
default void savePartitions(final List<Partition> partitions)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Alter partitions.
*
* @param dbName databaseName
* @param tableName tableName
* @param partitions partitions
* @throws TException TException
*/
default void alterPartitions(final String dbName, final String tableName,
final List<Partition> partitions) throws
TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* addDropPartitions.
*
* @param dbName dbName
* @param tableName tableName
* @param partitions partittions
* @param delPartitionNames deletePartitionNames
* @throws TException TException
*/
default void addDropPartitions(final String dbName, final String tableName,
final List<Partition> partitions,
final List<String> delPartitionNames) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* getDatabasePrivileges.
*
* @param user user
* @param databaseName databaseName
* @return set of privilege
*/
default Set<HivePrivilege> getDatabasePrivileges(String user, String databaseName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* getTablePrivileges.
*
* @param user user
* @param tableName databaseName
* @return set of privilege
*/
default Set<HivePrivilege> getTablePrivileges(String user, String tableName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Clean up any held resources.
*
* @throws TException TException
*/
default void shutdown() throws TException {
}
}
| 9,515 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorDatabaseService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.List;
/**
* HiveConnectorDatabaseService.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveConnectorDatabaseService implements ConnectorDatabaseService {
private final IMetacatHiveClient metacatHiveClient;
private final HiveConnectorInfoConverter hiveMetacatConverters;
/**
* Constructor.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
*/
public HiveConnectorDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
}
/**
* {@inheritDoc}.
*/
@Override
public void create(final ConnectorRequestContext requestContext, final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
try {
this.metacatHiveClient.createDatabase(hiveMetacatConverters.fromDatabaseInfo(databaseInfo));
} catch (AlreadyExistsException exception) {
throw new DatabaseAlreadyExistsException(databaseName, exception);
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException(databaseName, exception);
} catch (TException exception) {
throw new ConnectorException(
String.format("Failed creating hive database %s", databaseName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
this.metacatHiveClient.dropDatabase(name.getDatabaseName());
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (InvalidOperationException exception) {
throw new MetacatNotSupportedException(exception.getMessage());
} catch (TException exception) {
throw new ConnectorException(String.format("Failed delete hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
try {
this.metacatHiveClient.alterDatabase(databaseName.getDatabaseName(),
hiveMetacatConverters.fromDatabaseInfo(databaseInfo));
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(databaseName, exception);
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException(databaseName, exception);
} catch (TException exception) {
throw new ConnectorException(
String.format("Failed updating hive database %s", databaseName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final Database database = metacatHiveClient.getDatabase(name.getDatabaseName());
if (database != null) {
return hiveMetacatConverters.toDatabaseInfo(name, database);
} else {
throw new DatabaseNotFoundException(name);
}
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed get hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
boolean result;
try {
result = metacatHiveClient.getDatabase(name.getDatabaseName()) != null;
} catch (NoSuchObjectException exception) {
result = false;
} catch (TException exception) {
throw new ConnectorException(String.format("Failed to check hive database %s exists", name), exception);
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String databaseFilter = (prefix != null) ? prefix.getDatabaseName() : null;
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (databaseFilter != null && !databaseName.startsWith(databaseFilter)) {
continue;
}
qualifiedNames.add(qualifiedName);
}
//supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listName hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
}
return ConnectorUtils.paginate(databaseInfos, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
}
}
}
| 9,516 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,517 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/commonview/CommonViewHandler.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.commonview;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.spectator.api.Registry;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.CacheEvict;
import java.util.concurrent.ExecutionException;
/**
* CommonViewHandler class.
*
* @author zhenl
*/
//TODO: in case a third iceberg table like object we should refactor them as a common iceberg-like handler
@CacheConfig(cacheNames = "metacat")
public class CommonViewHandler {
private static final Retryer<Void> RETRY_ICEBERG_TABLE_UPDATE = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(TablePreconditionFailedException.class)
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
protected final ConnectorContext connectorContext;
protected final Registry registry;
/**
* CommonViewHandler Config
* Constructor.
*
* @param connectorContext connector context
*/
public CommonViewHandler(final ConnectorContext connectorContext) {
this.connectorContext = connectorContext;
this.registry = connectorContext.getRegistry();
}
/**
* get CommonView Table info.
*
* @param name Common view name
* @param tableLoc table location
* @param tableInfo table info
* @param hiveTypeConverter hive type converter
* @return table info
*/
public TableInfo getCommonViewTableInfo(final QualifiedName name,
final String tableLoc,
final TableInfo tableInfo,
final HiveTypeConverter hiveTypeConverter) {
return TableInfo.builder().name(name).auditInfo(tableInfo.getAudit())
.fields(tableInfo.getFields()).serde(tableInfo.getSerde())
.metadata(tableInfo.getMetadata()).build();
}
/**
* Update common view column comments if the provided tableInfo has updated field comments.
*
* @param tableInfo table information
* @return true if an update is done
*/
public boolean update(final TableInfo tableInfo) {
return false;
}
/**
* Handle common view update request using iceberg table
* update strategy for common views that employs iceberg library.
*
* @param requestContext request context
* @param directSqlTable direct sql table object
* @param tableInfo table info
* @param tableMetadataLocation the common view table metadata location.
*/
@CacheEvict(key = "'iceberg.view.' + #tableMetadataLocation", beforeInvocation = true)
public void handleUpdate(final ConnectorRequestContext requestContext,
final DirectSqlTable directSqlTable,
final TableInfo tableInfo,
final String tableMetadataLocation) {
requestContext.setIgnoreErrorsAfterUpdate(true);
final boolean viewUpdated = this.update(tableInfo);
if (viewUpdated) {
try {
RETRY_ICEBERG_TABLE_UPDATE.call(() -> {
try {
directSqlTable.updateIcebergTable(tableInfo);
} catch (TablePreconditionFailedException e) {
tableInfo.getMetadata()
.put(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION, e.getMetadataLocation());
this.update(tableInfo);
throw e;
}
return null;
});
} catch (RetryException e) {
Throwables.propagate(e.getLastFailedAttempt().getExceptionCause());
} catch (ExecutionException e) {
Throwables.propagate(e.getCause());
}
} else {
directSqlTable.updateIcebergTable(tableInfo);
}
}
}
| 9,518 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/commonview/package-info.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Common view.
*
* @author zhenl
* @since 1.3.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.commonview;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,519 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveConfigConstants.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
/**
* HiveConfigConstants.
*
* @author zhenl
* @since 1.0.0
*/
public final class HiveConfigConstants {
/**
* HIVE_METASTORE_TIMEOUT.
*/
public static final String HIVE_METASTORE_TIMEOUT = "hive.metastore-timeout";
/**
* hive thrift port.
*/
public static final String THRIFT_URI = "hive.metastore.uris";
/**
* USE_EMBEDDED_METASTORE.
*/
public static final String USE_EMBEDDED_METASTORE = "hive.use.embedded.metastore";
/**
* ALLOW_RENAME_TABLE.
*/
public static final String ALLOW_RENAME_TABLE = "hive.allow-rename-table";
/**
* USE_FASTPARTITION_SERVICE.
*/
public static final String USE_FASTHIVE_SERVICE = "hive.use.embedded.fastservice";
/**
* ENABLE_AUDIT_PROCESSING.
*/
public static final String ENABLE_AUDIT_PROCESSING = "hive.use.embedded.fastservice.auditEnabled";
/**
* GET_PARTITION_DETAILS_TIMEOUT.
*/
public static final String GET_PARTITION_DETAILS_TIMEOUT = "hive.use.embedded.GetPartitionDetailsTimeout";
/**
* GET_ICEBERG_PARTITIONS_TIMEOUT.
*/
public static final String GET_ICEBERG_PARTITIONS_TIMEOUT = "hive.iceberg.GetIcebergPartitionsTimeout";
/**
* USE_FAST_DELETION.
*/
public static final String USE_FAST_DELETION = "hive.use.embedded.sql.delete.partitions";
/**
* USE_FASTPARTITION_SERVICE.
*/
public static final String THREAD_POOL_SIZE = "hive.thread.pool.size";
/**
* USE_METASTORE_LOCAL.
*/
public static final String USE_METASTORE_LOCAL = "hive.metastore.local";
/**
* JAVAX_JDO_OPTION_NAME.
*/
public static final String JAVAX_JDO_OPTION_NAME = "javax.jdo.option.name";
/**
* JAVAX_JDO_DATASTORETIMEOUT.
*/
public static final String JAVAX_JDO_DATASTORETIMEOUT = "javax.jdo.option.DatastoreTimeout";
/**
* JAVAX_JDO_DATASTOREREADTIMEOUT.
*/
public static final String JAVAX_JDO_DATASTOREREADTIMEOUT = "javax.jdo.option.DatastoreReadTimeoutMillis";
/**
* JAVAX_JDO_DATASTOREWRITETIMEOUT.
*/
public static final String JAVAX_JDO_DATASTOREWRITETIMEOUT = "javax.jdo.option.DatastoreWriteTimeoutMillis";
/**
* JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS.
*/
public static final String JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS = "javax.jdo.PersistenceManagerFactoryClass";
/**
* JAVAX_JDO_PERSISTENCEMANAGER_FACTORY.
*/
public static final String JAVAX_JDO_PERSISTENCEMANAGER_FACTORY
= "com.netflix.metacat.connector.hive.client.embedded.HivePersistenceManagerFactory";
/**
* HIVE_METASTORE_DS_RETRY.
*/
public static final String HIVE_METASTORE_DS_RETRY = "hive.metastore.ds.retry.attempts";
/**
* HIVE_HMSHANDLER_RETRY.
*/
public static final String HIVE_HMSHANDLER_RETRY = "hive.hmshandler.retry.attempts";
/**
* HIVE_STATS_AUTOGATHER.
*/
public static final String HIVE_STATS_AUTOGATHER = "hive.stats.autogather";
/**
* DATANUCLEUS_AUTOSTARTMECHANISMMODE.
*/
public static final String DATANUCLEUS_AUTOSTARTMECHANISMMODE = "datanucleus.autoStartMechanismMode";
/**
* DATANUCLEUS_DETACHALLONCOMMIT.
*/
public static final String DATANUCLEUS_DETACHALLONCOMMIT = "datanucleus.detachAllOnCommit";
/**
* DATANUCLEUS_DETACHALLONROLLBACK.
*/
public static final String DATANUCLEUS_DETACHALLONROLLBACK = "datanucleus.detachAllOnRollback";
/**
* DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT.
*/
public static final String DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT = "datanucleus.persistenceByReachabilityAtCommit";
/**
* DATANUCLEUS_CACHE_LEVEL2_TYPE.
*/
public static final String DATANUCLEUS_CACHE_LEVEL2_TYPE = "datanucleus.cache.level2.type";
/**
* DATANUCLEUS_CACHE_LEVEL2.
*/
public static final String DATANUCLEUS_CACHE_LEVEL2 = "datanucleus.cache.level2";
/**
* DATANUCLEUS_VALIDATECOLUMNS.
*/
public static final String DATANUCLEUS_VALIDATECOLUMNS = "datanucleus.validateColumns";
/**
* DATANUCLEUS_VALIDATECONSTRAINTS.
*/
public static final String DATANUCLEUS_VALIDATECONSTRAINTS = "datanucleus.validateConstraints";
/**
* DATANUCLEUS_VALIDATETABLE.
*/
public static final String DATANUCLEUS_VALIDATETABLE = "datanucleus.validateTables";
/**
* DATANUCLEUS_TRANSACTIONISOLATION.
*/
public static final String DATANUCLEUS_TRANSACTIONISOLATION = "datanucleus.transactionIsolation";
/**
* DATANUCLEUS_READCOMMITTED.
*/
public static final String DATANUCLEUS_READCOMMITTED = "read-committed";
/**
* DATANUCLEUS_FIXEDDATASTORE.
*/
public static final String DATANUCLEUS_FIXEDDATASTORE = "datanucleus.fixedDatastore";
/**
* DATANUCLEUS_AUTOCREATESCHEMA.
*/
public static final String DATANUCLEUS_AUTOCREATESCHEMA = "datanucleus.autoCreateSchema";
/**
* DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS.
*/
public static final String DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS = "datanucleus.rdbms.CheckExistTablesOrViews";
/**
* DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO.
*/
public static final String DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO = "datanucleus.rdbms.initializeColumnInfo";
/**
* DATANUCLEUS_IDENTIFIERFACTORY.
*/
public static final String DATANUCLEUS_IDENTIFIERFACTORY = "datanucleus.identifierFactory";
/**
* DATANUCLEUS_DATANUCLEU1.
*/
public static final String DATANUCLEUS_DATANUCLEU1 = "datanucleus1";
/**
* DATANUCLEUS_CONNECTIONFACTORY.
*/
public static final String DATANUCLEUS_CONNECTIONFACTORY = "datanucleus.ConnectionFactory";
/**
* DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY.
*/
public static final String DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY
= "datanucleus.rdbms.useLegacyNativeValueStrategy";
/**
* HIVE_HMSHANDLER_NAME.
*/
public static final String HIVE_HMSHANDLER_NAME = "metacat";
/**
* METACAT_JDO_TIMEOUT.
*/
public static final String METACAT_JDO_TIMEOUT = "metacat.jdo.timeout";
/**
* Configuration to convert a table to external on rename table.
*/
public static final String ON_RENAME_CONVERT_TO_EXTERNAL = "metacat.on-rename-convert-to-external";
private HiveConfigConstants() {
}
}
| 9,520 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/PartitionUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.base.Strings;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
/**
* Utility class for partitions.
* @author amajumdar
*/
public final class PartitionUtil {
private PartitionUtil() {
}
/**
* Retrieves the partition values from the partition name. This method also validates the partition keys to that
* of the table.
*
* @param tableQName table name
* @param table table
* @param partName partition name
* @return list of partition values
*/
public static List<String> getPartValuesFromPartName(final QualifiedName tableQName, final Table table,
final String partName) {
if (Strings.isNullOrEmpty(partName)) {
throw new InvalidMetaException(tableQName, partName, null);
}
final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpec, new Path(partName));
final List<String> values = new ArrayList<>();
for (FieldSchema field : table.getPartitionKeys()) {
final String key = field.getName();
final String val = partSpec.get(key);
if (val == null) {
throw new InvalidMetaException(tableQName, partName, null);
}
values.add(val);
}
return values;
}
/**
* Escape partition name.
*
* @param partName partition name
* @return Escaped partition name
*/
public static String escapePartitionName(final String partName) {
final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpec, new Path(partName));
return FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()));
}
/**
* Generate partition name from the <code>partValues</code>.
*
* @param partitionKeys list of partition keys
* @param partValues list of partition values
* @return partition name
*/
public static String makePartName(final List<FieldSchema> partitionKeys, final List<String> partValues) {
try {
return Warehouse.makePartName(partitionKeys, partValues);
} catch (MetaException e) {
throw new InvalidMetaException("Failed making the part name from the partition values", e);
}
}
}
| 9,521 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveConnectorFastServiceMetric.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
/**
* Hive Connector Fast Service Metric.
*
* @author zhenl
* @since 1.1.0
*/
@Getter
@Slf4j
public class HiveConnectorFastServiceMetric {
private final HashMap<String, Timer> timerMap = new HashMap<>();
/**
* Constructor.
*
* @param registry the spectator registry
*/
public HiveConnectorFastServiceMetric(final Registry registry) {
timerMap.put(HiveMetrics.TagGetPartitionCount.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionCount.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitionKeys.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionKeys.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitionNames.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionNames.getMetricName()));
timerMap.put(HiveMetrics.TagTableExists.getMetricName(), createTimer(registry,
HiveMetrics.TagTableExists.getMetricName()));
timerMap.put(HiveMetrics.TagGetTableNames.getMetricName(), createTimer(registry,
HiveMetrics.TagGetTableNames.getMetricName()));
timerMap.put(HiveMetrics.TagAddPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAddPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAlterPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAlterPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagDropHivePartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagDropHivePartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAddDropPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAddDropPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAlterDatabase.getMetricName(), createTimer(registry,
HiveMetrics.TagAlterDatabase.getMetricName()));
}
private Timer createTimer(final Registry registry, final String requestTag) {
final HashMap<String, String> tags = new HashMap<>();
tags.put("request", requestTag);
return registry.timer(registry.createId(HiveMetrics.TimerFastHiveRequest.getMetricName()).withTags(tags));
}
/**
* record the duration to timer.
*
* @param metricName metric name.
* @param duration duration of the operation.
*/
public void recordTimer(final String metricName, final long duration) {
if (this.timerMap.containsKey(metricName)) {
log.debug("### Time taken to complete {} is {} ms", metricName, duration);
this.timerMap.get(metricName).record(duration, TimeUnit.MILLISECONDS);
} else {
log.error("Not supported metric {}", metricName);
}
}
}
| 9,522 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveTableUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.iceberg.catalog.TableIdentifier;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* HiveTableUtil.
*
* @author zhenl
* @since 1.0.0
*/
@SuppressWarnings("deprecation")
@Slf4j
public final class HiveTableUtil {
private static final String PARQUET_HIVE_SERDE = "parquet.hive.serde.ParquetHiveSerDe";
private HiveTableUtil() {
}
/**
* getTableStructFields.
*
* @param table table
* @return all struct field refs
*/
public static List<? extends StructField> getTableStructFields(final Table table) {
final Properties schema = MetaStoreUtils.getTableMetadata(table);
final String name = schema.getProperty(serdeConstants.SERIALIZATION_LIB);
if (name == null) {
return Collections.emptyList();
}
final Deserializer deserializer = createDeserializer(getDeserializerClass(name));
try {
deserializer.initialize(new Configuration(false), schema);
} catch (SerDeException e) {
throw new RuntimeException("error initializing deserializer: " + deserializer.getClass().getName());
}
try {
final ObjectInspector inspector = deserializer.getObjectInspector();
Preconditions.checkArgument(inspector.getCategory() == ObjectInspector.Category.STRUCT,
"expected STRUCT: %s", inspector.getCategory());
return ((StructObjectInspector) inspector).getAllStructFieldRefs();
} catch (SerDeException e) {
throw Throwables.propagate(e);
}
}
private static Class<? extends Deserializer> getDeserializerClass(final String name) {
// CDH uses different names for Parquet
if (PARQUET_HIVE_SERDE.equals(name)) {
return ParquetHiveSerDe.class;
}
try {
return Class.forName(name, true, JavaUtils.getClassLoader()).asSubclass(Deserializer.class);
} catch (ClassNotFoundException e) {
throw new RuntimeException("deserializer does not exist: " + name);
} catch (ClassCastException e) {
throw new RuntimeException("invalid deserializer class: " + name);
}
}
private static Deserializer createDeserializer(final Class<? extends Deserializer> clazz) {
try {
return clazz.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("error creating deserializer: " + clazz.getName(), e);
}
}
/**
* check if the table is an Iceberg Table.
*
* @param tableInfo table info
* @return true for iceberg table
*/
public static boolean isIcebergTable(final TableInfo tableInfo) {
final String tableTypeVal = getTableType(tableInfo);
return DirectSqlTable.ICEBERG_TABLE_TYPE.equalsIgnoreCase(tableTypeVal);
}
private static String getTableType(final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
final String fallbackTableType = "unknown";
final MetacatRequestContext context = MetacatContextManager.getContext();
final Map<String, String> metadata = tableInfo.getMetadata();
if (metadata == null) {
context.updateTableTypeMap(tableName, fallbackTableType);
return null;
}
String tableType = metadata.get(DirectSqlTable.PARAM_TABLE_TYPE);
if (StringUtils.isBlank(tableType)) {
tableType = fallbackTableType;
}
context.updateTableTypeMap(tableName, tableType);
return tableType;
}
/**
* get iceberg table metadata location.
*
* @param tableInfo table info
* @return iceberg table metadata location
*/
public static String getIcebergTableMetadataLocation(final TableInfo tableInfo) {
return tableInfo.getMetadata().get(DirectSqlTable.PARAM_METADATA_LOCATION);
}
/**
* Convert qualified name to table identifier.
*
* @param name qualified name
* @return table identifier
*/
public static TableIdentifier qualifiedNameToTableIdentifier(final QualifiedName name) {
return TableIdentifier.parse(name.toString().replace('/', '.'));
}
/** check if the table is a common view.
*
* @param tableInfo table info
* @return true for common view
*/
public static boolean isCommonView(final TableInfo tableInfo) {
return tableInfo != null && tableInfo.getMetadata() != null
&& MetacatUtils.isCommonView(tableInfo.getMetadata());
}
/**
* get common view metadata location.
*
* @param tableInfo table info
* @return common view metadata location
*/
public static String getCommonViewMetadataLocation(final TableInfo tableInfo) {
return tableInfo.getMetadata().get(DirectSqlTable.PARAM_METADATA_LOCATION);
}
/**
* Throws an invalid meta exception
* if the metadata for a table is null or empty.
*
* @param tableName the table name.
* @param metadata the table metadata.
*/
public static void throwIfTableMetadataNullOrEmpty(final QualifiedName tableName,
final Map<String, String> metadata) {
if (metadata == null || metadata.isEmpty()) {
final String message = String.format("No parameters defined for iceberg table %s", tableName);
log.warn(message);
throw new InvalidMetaException(tableName, message, null);
}
}
}
| 9,523 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HivePartitionKeyParserEval.java
|
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import org.apache.hadoop.hive.common.FileUtils;
/**
* Hive partition key evaluation.
*/
public class HivePartitionKeyParserEval extends PartitionKeyParserEval {
@Override
protected String toValue(final Object value) {
return value == null ? PartitionUtil.DEFAULT_PARTITION_NAME
: FileUtils.escapePathName(value.toString());
}
}
| 9,524 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/IcebergFilterGenerator.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.types.Types;
import com.netflix.metacat.common.server.partition.parser.ASTAND;
import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN;
import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE;
import com.netflix.metacat.common.server.partition.parser.ASTIN;
import com.netflix.metacat.common.server.partition.parser.ASTLIKE;
import com.netflix.metacat.common.server.partition.parser.ASTMATCHES;
import com.netflix.metacat.common.server.partition.parser.ASTNOT;
import com.netflix.metacat.common.server.partition.parser.ASTOR;
import com.netflix.metacat.common.server.partition.parser.ASTVAR;
import com.netflix.metacat.common.server.partition.parser.SimpleNode;
import com.netflix.metacat.common.server.partition.parser.Variable;
import com.netflix.metacat.common.server.partition.visitor.PartitionParserEval;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import java.math.BigDecimal;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Iceberg Filter generator.
*/
public class IcebergFilterGenerator extends PartitionParserEval {
private static final Set<String> ICEBERG_TIMESTAMP_NAMES
= ImmutableSet.of("dateCreated", "lastUpdated");
private final Map<String, Types.NestedField> fieldMap;
/**
* Constructor.
*
* @param fields partition fields
*/
public IcebergFilterGenerator(final List<Types.NestedField> fields) {
fieldMap = Maps.newHashMap();
for (final Types.NestedField field : fields) {
fieldMap.put(field.name(), field);
}
}
@Override
public Object visit(final ASTAND node, final Object data) {
return Expressions.and((Expression) node.jjtGetChild(0).jjtAccept(this, data),
(Expression) node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTOR node, final Object data) {
return Expressions.or((Expression) node.jjtGetChild(0).jjtAccept(this, data),
(Expression) node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTCOMPARE node, final Object data) {
if (node.jjtGetNumChildren() == 1) {
return evalSingleTerm(node, data).toString();
} else {
return evalString(node, data);
}
}
@Override
public Object visit(final ASTVAR node, final Object data) {
return ((Variable) node.jjtGetValue()).getName();
}
@Override
public Object visit(final ASTBETWEEN node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
final Object startValue = node.jjtGetChild(1).jjtAccept(this, data);
final Object endValue = node.jjtGetChild(2).jjtAccept(this, data);
final Expression compare1 =
createIcebergExpression(value, startValue, node.not ? Compare.LT : Compare.GTE);
final Expression compare2 =
createIcebergExpression(value, endValue, node.not ? Compare.GT : Compare.LTE);
return (node.not)
? Expressions.or(compare1, compare2) : Expressions.and(compare1, compare2);
}
@Override
public Object visit(final ASTIN node, final Object data) {
throw new UnsupportedOperationException("IN Operator not supported");
}
@Override
public Object visit(final ASTMATCHES node, final Object data) {
throw new UnsupportedOperationException("Operator Not supported");
}
@Override
public Object visit(final ASTNOT node, final Object data) {
throw new UnsupportedOperationException("Operator Not supported");
}
@Override
public Object visit(final ASTLIKE node, final Object data) {
throw new UnsupportedOperationException("Not supported");
}
private Expression evalSingleTerm(final ASTCOMPARE node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
if (value != null) {
return Boolean.parseBoolean(value.toString())
? Expressions.alwaysTrue() : Expressions.alwaysFalse();
}
return Expressions.alwaysFalse();
}
/**
* evalString.
*
* @param node node
* @param data data
* @return eval String
*/
private Expression evalString(final SimpleNode node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(2).jjtAccept(this, data);
return createIcebergExpression(lhs, rhs, comparison);
}
/**
* Check if the key is part of field.
*
* @param key input string
* @return True if key is a field.
*/
private boolean isField(final Object key) {
return (key instanceof String) && fieldMap.containsKey(((String) key).toLowerCase());
}
/**
* Check if the key is an iceberg supported date filter field.
*
* @param key input string
* @return True if key is an iceberg supported date filter field.
*/
private boolean isIcebergTimestamp(final Object key) {
return (key instanceof String) && ICEBERG_TIMESTAMP_NAMES.contains(key);
}
/**
* Get the key and value field of iceberg expression.
*
* @param lhs left hand string
* @param rhs right hand string
* @return key value pair for iceberg expression.
*/
private Pair<String, Object> getExpressionKeyValue(final Object lhs,
final Object rhs) {
if (isIcebergTimestamp(lhs)) {
return new ImmutablePair<>(lhs.toString(), ((BigDecimal) rhs).longValue());
} else if (isIcebergTimestamp(rhs)) {
return new ImmutablePair<>(rhs.toString(), ((BigDecimal) lhs).longValue());
}
if (isField(lhs)) {
return new ImmutablePair<>(lhs.toString(), getValue(lhs.toString(), rhs));
} else if (isField(rhs)) {
return new ImmutablePair<>(rhs.toString(), getValue(rhs.toString(), lhs));
}
throw new IllegalArgumentException(
String.format("Invalid input \"%s/%s\" filter must be columns in fields %s or %s",
lhs, rhs, fieldMap.keySet().toString(), ICEBERG_TIMESTAMP_NAMES.toString()));
}
/**
* Transform the value type to iceberg type.
*
* @param key the input filter key
* @param value the input filter value
* @return iceberg type
*/
private Object getValue(final String key, final Object value) {
if (value instanceof BigDecimal) {
switch (fieldMap.get(key).type().typeId()) {
case LONG:
return ((BigDecimal) value).longValue();
case INTEGER:
return ((BigDecimal) value).intValue();
case DOUBLE:
return ((BigDecimal) value).doubleValue();
case FLOAT:
return ((BigDecimal) value).floatValue();
case DECIMAL:
return value;
default:
throw new IllegalArgumentException("Cannot convert the given big decimal value to an Iceberg type");
}
}
return value;
}
/**
* Based on filter create iceberg expression.
*
* @param lhs left hand string
* @param rhs right hand string
* @param comparison comparing operator
* @return iceberg expression
*/
private Expression createIcebergExpression(final Object lhs,
final Object rhs,
final Compare comparison) {
final Pair<String, Object> keyValue = getExpressionKeyValue(lhs, rhs);
final String key = keyValue.getLeft();
final Object value = keyValue.getRight();
switch (comparison) {
case EQ:
return Expressions.equal(key, value);
case LTE:
return Expressions.lessThanOrEqual(key, value);
case GTE:
return Expressions.greaterThanOrEqual(key, value);
case GT:
return Expressions.greaterThan(key, value);
case LT:
return Expressions.lessThan(key, value);
case NEQ:
return Expressions.notEqual(key, value);
default:
throw new UnsupportedOperationException(String.format("Operator %s supported", comparison));
}
}
}
| 9,525 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveFilterPartition.java
|
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import org.apache.hadoop.hive.common.FileUtils;
import java.util.Map;
/**
* Filter partition for hive.
*
* @author amajumdar
*/
public class HiveFilterPartition extends FilterPartition {
@Override
protected void addNameValues(final String name, final Map<String, String> values) {
super.addNameValues(name, values);
values.replaceAll((key, value) -> value == null ? null : FileUtils.unescapePathName(value));
}
}
| 9,526 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/PartitionFilterGenerator.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.partition.parser.ASTAND;
import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN;
import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE;
import com.netflix.metacat.common.server.partition.parser.ASTIN;
import com.netflix.metacat.common.server.partition.parser.ASTLIKE;
import com.netflix.metacat.common.server.partition.parser.ASTMATCHES;
import com.netflix.metacat.common.server.partition.parser.ASTNOT;
import com.netflix.metacat.common.server.partition.parser.ASTNULL;
import com.netflix.metacat.common.server.partition.parser.ASTOR;
import com.netflix.metacat.common.server.partition.parser.ASTVAR;
import com.netflix.metacat.common.server.partition.parser.SimpleNode;
import com.netflix.metacat.common.server.partition.parser.Variable;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionParserEval;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.serde.serdeConstants;
import java.math.BigDecimal;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.Map;
/**
* PartitionFilterGenerator.
*
* @author zhenl
* @since 1.0.0
*/
public class PartitionFilterGenerator extends PartitionParserEval {
private final Map<String, PartitionCol> partitionColumns;
private final List<Object> params;
private List<String> partVals;
private boolean optimized;
private final boolean escapePartitionNameOnFilter;
/**
* Constructor.
*
* @param partitionsKeys partition keys
* @param escapePartitionNameOnFilter if true, escape the partition name
*/
public PartitionFilterGenerator(final List<FieldSchema> partitionsKeys, final boolean escapePartitionNameOnFilter) {
partitionColumns = Maps.newHashMap();
this.partVals = Lists.newArrayListWithCapacity(partitionsKeys.size());
for (int index = 0; index < partitionsKeys.size(); index++) {
final FieldSchema partitionKey = partitionsKeys.get(index);
partitionColumns.put(partitionKey.getName().toLowerCase(), new PartitionCol(index, partitionKey.getType()));
this.partVals.add(null);
}
this.params = Lists.newArrayList();
this.optimized = true;
this.escapePartitionNameOnFilter = escapePartitionNameOnFilter;
}
/**
* evalString.
*
* @param node node
* @param data data
* @return eval String
*/
public String evalString(final SimpleNode node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(2).jjtAccept(this, data);
return createSqlCriteria(lhs, rhs, comparison, false);
}
private String createSqlCriteria(final Object lhs, final Object rhs, final Compare comparison, final boolean not) {
String key = null;
Object value = null;
boolean isKeyLhs = true;
//
// lhs, rhs or both can be keys
//
if (lhs instanceof String && isKey((String) lhs)) {
key = lhs.toString();
value = rhs;
} else if (rhs instanceof String && isKey((String) rhs)) {
key = rhs.toString();
value = lhs;
isKeyLhs = false;
}
if (key == null || value == null) {
throw new RuntimeException("Invalid expression key/value " + lhs + "/" + rhs);
}
final PartitionCol partCol = partitionColumns.get(key.toLowerCase());
final String valueStr = value.toString();
final String operator = not ? "not " + comparison.getExpression() : comparison.getExpression();
if (partCol != null && valueStr != null && (partitionColumns.containsKey(valueStr.toLowerCase()))) {
// Key part column
partCol.occurred();
final FilterType colType = partCol.type;
optimized = false;
// Value part column
final PartitionCol valuePartCol = partitionColumns.get(valueStr);
valuePartCol.occurred();
final FilterType valueColType = valuePartCol.type;
if (colType != valueColType) {
throw new RuntimeException(
String.format("Invalid column comparison with key as %s and"
+ " value as %s", colType, valueColType));
}
return String.format("%s %s %s", getSQLExpression(partCol), operator, getSQLExpression(valuePartCol));
} else if (partCol != null) {
partCol.occurred();
// For more optimization
if (partCol.hasOccurredOnlyOnce() && Compare.EQ.equals(comparison)) {
partVals.set(partCol.index, key + "="
+ (escapePartitionNameOnFilter ? FileUtils.escapePathName(valueStr) : valueStr));
} else {
optimized = false;
}
final FilterType colType = partCol.type;
if (colType == FilterType.Invalid) {
throw new RuntimeException("Invalid type " + colType);
}
FilterType valType = FilterType.fromClass(value);
if (valType == FilterType.Invalid) {
throw new RuntimeException("Invalid value " + value.getClass());
}
if (colType == FilterType.Date && valType == FilterType.String) {
try {
value = new java.sql.Date(
HiveMetaStore.PARTITION_DATE_FORMAT.get().parse((String) value).getTime());
valType = FilterType.Date;
} catch (ParseException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.Timestamp && valType == FilterType.String) {
try {
value = new java.sql.Timestamp(
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse((String) value).getTime());
valType = FilterType.Timestamp;
} catch (ParseException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.Integral && valType == FilterType.String) {
try {
value = new BigDecimal((String) value);
valType = FilterType.Integral;
} catch (NumberFormatException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.String && valType != FilterType.String) {
value = value.toString();
valType = FilterType.String;
}
if (colType != valType) {
throw new RuntimeException("Invalid value " + value.getClass());
}
key = getSQLExpression(partCol);
params.add(value);
} else if ("batchid".equalsIgnoreCase(key)) {
return "1=1";
} else if ("dateCreated".equalsIgnoreCase(key)) {
optimized = false;
key = "p.CREATE_TIME";
params.add(value);
} else {
throw new RuntimeException("Invalid expression key " + key);
}
return isKeyLhs ? String.format("%s %s %s", key, operator, "?")
: String.format("%s %s %s", "?", operator, key);
}
private String getSQLExpression(final PartitionCol partCol) {
String result = "pv" + partCol.index + ".part_key_val";
if (partCol.type != FilterType.String) {
if (partCol.type == FilterType.Integral) {
result = "cast(" + result + " as decimal(21,0))";
} else if (partCol.type == FilterType.Date) {
result = "cast(" + result + " as date)";
} else if (partCol.type == FilterType.Timestamp) {
result = "cast(" + result + " as timestamp)";
}
}
return result;
}
private boolean isKey(final String key) {
return partitionColumns.containsKey(
key.toLowerCase()) || "batchid".equalsIgnoreCase(key) || "dateCreated".equalsIgnoreCase(key);
}
public List<Object> getParams() {
return params;
}
/**
* joinSQL.
*
* @return joined sql
*/
public String joinSql() {
final StringBuilder result = new StringBuilder();
if (!isOptimized()) {
partitionColumns.values().forEach(partCol -> {
if (partCol.hasOccurred()) {
final String tableAlias = "pv" + partCol.index;
result.append(" join PARTITION_KEY_VALS as ").append(tableAlias)
.append(" on p.part_id=").append(tableAlias).append(".part_id and ")
.append(tableAlias).append(".integer_idx=").append(partCol.index);
}
});
}
return result.toString();
}
public boolean isOptimized() {
return optimized;
}
/**
* getOptimizedSql.
*
* @return get Optimized Sql
*/
public String getOptimizedSql() {
final StringBuilder result = new StringBuilder();
boolean likeExpression = false;
boolean emptyPartVals = true;
if (isOptimized()) {
for (int i = 0; i < partVals.size(); i++) {
final String partVal = partVals.get(i);
if (partVal == null) {
likeExpression = true;
result.append("%");
} else {
emptyPartVals = false;
result.append(partVal);
if (i + 1 != partVals.size()) {
result.append("/");
}
}
}
}
if (emptyPartVals) {
return result.toString();
} else if (likeExpression) {
params.clear();
params.add(result.toString());
return "p.part_name like ?";
} else {
params.clear();
params.add(result.toString());
return "p.part_name = ?";
}
}
@Override
public Object visit(final ASTAND node, final Object data) {
return String.format("(%s %s %s)", node.jjtGetChild(0).jjtAccept(this, data), "and",
node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTOR node, final Object data) {
optimized = false;
return String.format("(%s %s %s)", node.jjtGetChild(0).jjtAccept(this, data), "or",
node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTCOMPARE node, final Object data) {
if (node.jjtGetNumChildren() == 1) {
return evalSingleTerm(node, data).toString();
} else {
return evalString(node, data);
}
}
private Boolean evalSingleTerm(final ASTCOMPARE node, final Object data) {
Boolean result = Boolean.FALSE;
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
if (value != null) {
result = Boolean.parseBoolean(value.toString());
}
return result;
}
@Override
public Object visit(final ASTBETWEEN node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
final Object startValue = node.jjtGetChild(1).jjtAccept(this, data);
final Object endValue = node.jjtGetChild(2).jjtAccept(this, data);
final String compare1 = createSqlCriteria(value, startValue, node.not ? Compare.LT : Compare.GTE, false);
final String compare2 = createSqlCriteria(value, endValue, node.not ? Compare.GT : Compare.LTE, false);
return String.format("(%s %s %s)", compare1, node.not ? "or" : "and", compare2);
}
@Override
public Object visit(final ASTIN node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final StringBuilder builder = new StringBuilder();
for (int i = 1; i < node.jjtGetNumChildren(); i++) {
final Object inValue = node.jjtGetChild(i).jjtAccept(this, data);
if (i != 1) {
builder.append(",");
}
if (inValue instanceof String) {
builder.append("'").append(inValue).append("'");
} else {
builder.append(inValue);
}
}
final PartitionCol partCol = partitionColumns.get(lhs.toString().toLowerCase());
if (partCol != null) {
partCol.occurred();
optimized = false;
final String operator = node.not ? "not in" : "in";
return String.format("%s %s (%s)", getSQLExpression(partCol), operator, builder.toString());
} else {
throw new RuntimeException("Invalid expression key " + lhs);
}
}
@Override
public Object visit(final ASTLIKE node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(1).jjtAccept(this, data);
return createSqlCriteria(lhs, rhs, Compare.LIKE, node.not);
}
@Override
public Object visit(final ASTNULL node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
return createSqlCriteria(lhs, PartitionUtil.DEFAULT_PARTITION_NAME, Compare.EQ, node.not);
}
@Override
public Object visit(final ASTVAR node, final Object data) {
return ((Variable) node.jjtGetValue()).getName();
}
@Override
public Object visit(final ASTMATCHES node, final Object data) {
throw new RuntimeException("Not supported");
}
@Override
public Object visit(final ASTNOT node, final Object data) {
throw new RuntimeException("Not supported");
}
private enum FilterType {
Integral,
String,
Date,
Timestamp,
Invalid;
static FilterType fromType(final String colTypeStr) {
if (colTypeStr.equals(serdeConstants.STRING_TYPE_NAME)) {
return FilterType.String;
} else if (colTypeStr.equals(serdeConstants.DATE_TYPE_NAME)) {
return FilterType.Date;
} else if (colTypeStr.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
return FilterType.Timestamp;
} else if (serdeConstants.IntegralTypes.contains(colTypeStr)) {
return FilterType.Integral;
}
return FilterType.Invalid;
}
public static FilterType fromClass(final Object value) {
if (value instanceof String) {
return FilterType.String;
} else if (value instanceof Number) {
return FilterType.Integral;
} else if (value instanceof java.sql.Date) {
return FilterType.Date;
} else if (value instanceof java.sql.Timestamp) {
return FilterType.Timestamp;
}
return FilterType.Invalid;
}
}
static class PartitionCol {
private int index;
private FilterType type;
private int occurrences;
PartitionCol(final int index, final String type) {
this.index = index;
this.type = FilterType.fromType(type);
}
void occurred() {
occurrences++;
}
boolean hasOccurred() {
return occurrences > 0;
}
boolean hasOccurredOnlyOnce() {
return occurrences == 1;
}
}
}
| 9,527 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* com.netflix.metacat.connector.hive.util.
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.util;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,528 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveConnectorInfoConverter.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.connectors.model.ViewInfo;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import java.time.Instant;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Hive connector info converter.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorInfoConverter implements ConnectorInfoConverter<Database, Table, Partition> {
private static final Splitter SLASH_SPLITTER = Splitter.on('/');
private static final Splitter EQUAL_SPLITTER = Splitter.on('=').limit(2);
private HiveTypeConverter hiveTypeConverter = new HiveTypeConverter();
/**
* Constructor.
*
* @param hiveTypeConverter typeconverter
*/
public HiveConnectorInfoConverter(final HiveTypeConverter hiveTypeConverter) {
this.hiveTypeConverter = hiveTypeConverter;
}
/**
* Converts epoch time to Date.
*
* @param seconds time in seconds
* @return Date
*/
public static Date epochSecondsToDate(final long seconds) {
return Date.from(Instant.ofEpochSecond(seconds));
}
/**
* Converts to DatabaseDto.
*
* @param database connector database
* @return Metacat database Info
*/
@Override
public DatabaseInfo toDatabaseInfo(
final QualifiedName qualifiedName,
final Database database
) {
return DatabaseInfo.builder()
.name(qualifiedName)
.uri(database.getLocationUri())
.metadata(database.getParameters())
.build();
}
/**
* Converts from DatabaseDto to the connector database.
*
* @param databaseInfo Metacat database Info
* @return connector database
*/
@Override
public Database fromDatabaseInfo(final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
final String name = (databaseName == null) ? "" : databaseName.getDatabaseName();
//this is a temp hack to resolve the uri = null issue
// final String dbUri = Strings.isNullOrEmpty(databaseInfo.getUri()) ? "file://temp/" : databaseInfo.getUri();
final Map<String, String> metadata
= (databaseInfo.getMetadata() != null) ? databaseInfo.getMetadata() : Collections.EMPTY_MAP;
return new Database(name, name, databaseInfo.getUri(), metadata);
}
/**
* Converts to TableDto.
*
* @param table connector table
* @return Metacat table Info
*/
@Override
public TableInfo toTableInfo(final QualifiedName name, final Table table) {
final List<FieldSchema> nonPartitionColumns =
(table.getSd() != null) ? table.getSd().getCols() : Collections.emptyList();
// add the data fields to the nonPartitionColumns
//ignore all exceptions
try {
if (nonPartitionColumns.isEmpty()) {
for (StructField field : HiveTableUtil.getTableStructFields(table)) {
final FieldSchema fieldSchema = new FieldSchema(field.getFieldName(),
field.getFieldObjectInspector().getTypeName(),
field.getFieldComment());
nonPartitionColumns.add(fieldSchema);
}
}
} catch (final Exception e) {
log.error(e.getMessage(), e);
}
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final Date creationDate = table.isSetCreateTime() ? epochSecondsToDate(table.getCreateTime()) : null;
final List<FieldInfo> allFields =
Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream()
.map(field -> hiveToMetacatField(field, false))
.forEachOrdered(allFields::add);
partitionColumns.stream()
.map(field -> hiveToMetacatField(field, true))
.forEachOrdered(allFields::add);
final AuditInfo auditInfo = AuditInfo.builder().createdDate(creationDate).build();
if (null != table.getTableType() && table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
return TableInfo.builder()
.serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields)
.metadata(table.getParameters()).name(name).auditInfo(auditInfo)
.view(ViewInfo.builder().
viewOriginalText(table.getViewOriginalText())
.viewExpandedText(table.getViewExpandedText()).build()
).build();
} else {
return TableInfo.builder()
.serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields)
.metadata(table.getParameters()).name(name).auditInfo(auditInfo)
.build();
}
}
/**
* Converts IcebergTable to TableDto.
*
* @param name qualified name
* @param tableWrapper iceberg table wrapper containing the table info and extra properties
* @param tableLoc iceberg table metadata location
* @param tableInfo table info
* @return Metacat table Info
*/
public TableInfo fromIcebergTableToTableInfo(final QualifiedName name,
final IcebergTableWrapper tableWrapper,
final String tableLoc,
final TableInfo tableInfo) {
final org.apache.iceberg.Table table = tableWrapper.getTable();
final List<FieldInfo> allFields =
this.hiveTypeConverter.icebergeSchemaTofieldDtos(table.schema(), table.spec().fields());
final Map<String, String> tableParameters = new HashMap<>();
tableParameters.put(DirectSqlTable.PARAM_TABLE_TYPE, DirectSqlTable.ICEBERG_TABLE_TYPE);
tableParameters.put(DirectSqlTable.PARAM_METADATA_LOCATION, tableLoc);
tableParameters.put(DirectSqlTable.PARAM_PARTITION_SPEC, table.spec().toString());
//adding iceberg table properties
tableParameters.putAll(table.properties());
tableParameters.putAll(tableWrapper.getExtraProperties());
final StorageInfo.StorageInfoBuilder storageInfoBuilder = StorageInfo.builder();
if (tableInfo.getSerde() != null) {
// Adding the serde properties to support old engines.
storageInfoBuilder.inputFormat(tableInfo.getSerde().getInputFormat())
.outputFormat(tableInfo.getSerde().getOutputFormat())
.uri(tableInfo.getSerde().getUri())
.serializationLib(tableInfo.getSerde().getSerializationLib());
}
return TableInfo.builder().fields(allFields)
.metadata(tableParameters)
.serde(storageInfoBuilder.build())
.name(name).auditInfo(tableInfo.getAudit())
.build();
}
/**
* Converts from TableDto to the connector table.
*
* @param tableInfo Metacat table Info
* @return connector table
*/
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
final QualifiedName name = tableInfo.getName();
final String tableName = (name != null) ? name.getTableName() : "";
final String databaseName = (name != null) ? name.getDatabaseName() : "";
final StorageInfo storageInfo = tableInfo.getSerde();
final String owner = (storageInfo != null && storageInfo.getOwner() != null)
? storageInfo.getOwner() : "";
final AuditInfo auditInfo = tableInfo.getAudit();
final int createTime = (auditInfo != null && auditInfo.getCreatedDate() != null)
? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final Map<String, String> params = (tableInfo.getMetadata() != null)
? tableInfo.getMetadata() : new HashMap<>();
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> partitionFields = Collections.emptyList();
List<FieldSchema> nonPartitionFields = Collections.emptyList();
if (fields != null) {
nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
partitionFields = Lists.newArrayListWithCapacity(fields.size());
for (FieldInfo fieldInfo : fields) {
if (fieldInfo.isPartitionKey()) {
partitionFields.add(metacatToHiveField(fieldInfo));
} else {
nonPartitionFields.add(metacatToHiveField(fieldInfo));
}
}
}
final StorageDescriptor sd = fromStorageInfo(storageInfo, nonPartitionFields);
final ViewInfo viewInfo = tableInfo.getView();
final String tableType = (null != viewInfo
&& !Strings.isNullOrEmpty(viewInfo.getViewOriginalText()))
? TableType.VIRTUAL_VIEW.name() : TableType.EXTERNAL_TABLE.name();
return new Table(tableName,
databaseName,
owner,
createTime,
0,
0,
sd,
partitionFields,
params,
tableType.equals(TableType.VIRTUAL_VIEW.name())
? tableInfo.getView().getViewOriginalText() : null,
tableType.equals(TableType.VIRTUAL_VIEW.name())
? tableInfo.getView().getViewExpandedText() : null,
tableType);
}
/**
* Converts to PartitionDto.
*
* @param partition connector partition
* @return Metacat partition Info
*/
@Override
public PartitionInfo toPartitionInfo(
final TableInfo tableInfo,
final Partition partition
) {
final QualifiedName tableName = tableInfo.getName();
final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(),
tableName.getTableName(),
getNameFromPartVals(tableInfo, partition.getValues()));
final String owner = notNull(tableInfo.getSerde()) ? tableInfo.getSerde().getOwner() : "";
final AuditInfo auditInfo = AuditInfo.builder()
.createdDate(epochSecondsToDate(partition.getCreateTime()))
.lastModifiedDate(epochSecondsToDate(partition.getLastAccessTime())).build();
return PartitionInfo.builder()
.serde(toStorageInfo(partition.getSd(), owner))
.name(partitionName)
.auditInfo(auditInfo)
.metadata(partition.getParameters())
.build();
}
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(
final TableInfo tableInfo,
final PartitionInfo partition
) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
//can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream()
.filter(field -> !field.isPartitionKey())
.map(this::metacatToHiveField)
.collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
//using the table level serialization lib
if (
notNull(sd.getSerdeInfo())
&& notNull(tableInfo.getSerde())
&& Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())
) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate()))
? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate()))
? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2,
"Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(
values,
databaseName,
tableName,
createTime,
lastAccessTime,
sd,
metadata);
}
/**
* metacatToHiveField.
*
* @param fieldInfo fieldInfo
* @return FieldSchema
*/
public FieldSchema metacatToHiveField(final FieldInfo fieldInfo) {
final FieldSchema result = new FieldSchema();
result.setName(fieldInfo.getName());
if (StringUtils.isBlank(fieldInfo.getSourceType())) {
result.setType(hiveTypeConverter.fromMetacatType(fieldInfo.getType()));
} else {
result.setType(fieldInfo.getSourceType());
}
result.setComment(fieldInfo.getComment());
return result;
}
/**
* hiveToMetacatField.
*
* @param field field
* @param isPartitionKey boolean
* @return field info obj
*/
private FieldInfo hiveToMetacatField(final FieldSchema field, final boolean isPartitionKey) {
return FieldInfo.builder().name(field.getName())
.type(hiveTypeConverter.toMetacatType(field.getType()))
.sourceType(field.getType())
.comment(field.getComment())
.partitionKey(isPartitionKey)
.build();
}
private StorageInfo toStorageInfo(final StorageDescriptor sd, final String owner) {
if (sd == null) {
return new StorageInfo();
}
if (sd.getSerdeInfo() != null) {
return StorageInfo.builder().owner(owner)
.uri(sd.getLocation())
.inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat())
.parameters(sd.getParameters())
.serializationLib(sd.getSerdeInfo().getSerializationLib())
.serdeInfoParameters(sd.getSerdeInfo().getParameters())
.build();
}
return StorageInfo.builder().owner(owner).uri(sd.getLocation()).inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat()).parameters(sd.getParameters()).build();
}
@VisibleForTesting
Integer dateToEpochSeconds(final Date date) {
return null == date ? null : Math.toIntExact(date.toInstant().getEpochSecond());
}
private StorageDescriptor fromStorageInfo(final StorageInfo storageInfo, final List<FieldSchema> cols) {
if (storageInfo == null) {
return new StorageDescriptor(
Collections.emptyList(),
"",
null,
null,
false,
0,
new SerDeInfo("", null, new HashMap<>()),
Collections.emptyList(),
Collections.emptyList(),
new HashMap<>());
}
// Set all required fields to a non-null value
final String inputFormat = storageInfo.getInputFormat();
final String location = notNull(storageInfo.getUri()) ? storageInfo.getUri() : "";
final String outputFormat = storageInfo.getOutputFormat();
final Map<String, String> sdParams = notNull(storageInfo.getParameters())
? storageInfo.getParameters() : new HashMap<>();
final Map<String, String> serdeParams = notNull(storageInfo.getSerdeInfoParameters())
? storageInfo.getSerdeInfoParameters() : new HashMap<>();
final String serializationLib = storageInfo.getSerializationLib();
return new StorageDescriptor(
cols,
location,
inputFormat,
outputFormat,
false,
0,
new SerDeInfo("", serializationLib, serdeParams),
Collections.emptyList(),
Collections.emptyList(),
sdParams);
}
private String getNameFromPartVals(final TableInfo tableInfo, final List<String> partVals) {
final List<String> partitionKeys = getPartitionKeys(tableInfo.getFields());
if (partitionKeys.size() != partVals.size()) {
throw new IllegalArgumentException("Not the same number of partition columns and partition values");
}
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < partitionKeys.size(); i++) {
if (builder.length() > 0) {
builder.append('/');
}
builder.append(partitionKeys.get(i))
.append('=')
.append(partVals.get(i));
}
return builder.toString();
}
private List<String> getPartitionKeys(final List<FieldInfo> fieldInfos) {
if (fieldInfos == null) {
return null;
} else if (fieldInfos.isEmpty()) {
return Collections.emptyList();
}
final List<String> keys = new LinkedList<>();
for (FieldInfo field : fieldInfos) {
if (field.isPartitionKey()) {
keys.add(field.getName());
}
}
return keys;
}
private boolean notNull(final Object object) {
return null != object;
}
}
| 9,529 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveTypeMapping.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeEnum;
import com.netflix.metacat.common.type.VarbinaryType;
import lombok.Getter;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import java.util.Map;
/**
* Hive type mapping.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveTypeMapping {
@Getter
private static final Map<Type, String> CANONICAL_TO_HIVE = ImmutableMap.<Type, String>builder()
.put(BaseType.TINYINT, serdeConstants.TINYINT_TYPE_NAME)
.put(BaseType.SMALLINT, serdeConstants.SMALLINT_TYPE_NAME)
.put(BaseType.INT, serdeConstants.INT_TYPE_NAME)
.put(BaseType.BIGINT, serdeConstants.BIGINT_TYPE_NAME)
.put(BaseType.FLOAT, serdeConstants.FLOAT_TYPE_NAME)
.put(BaseType.DOUBLE, serdeConstants.DOUBLE_TYPE_NAME)
.put(BaseType.BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME)
.put(BaseType.STRING, serdeConstants.STRING_TYPE_NAME)
.put(VarbinaryType.VARBINARY, serdeConstants.BINARY_TYPE_NAME)
.put(BaseType.DATE, serdeConstants.DATE_TYPE_NAME)
.put(BaseType.TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME)
.build();
@Getter
private static final Map<String, Type> HIVE_TO_CANONICAL = ImmutableMap.<String, Type>builder()
.put(PrimitiveObjectInspector.PrimitiveCategory.BOOLEAN.name(), BaseType.BOOLEAN)
.put(PrimitiveObjectInspector.PrimitiveCategory.BYTE.name(), BaseType.TINYINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.SHORT.name(), BaseType.SMALLINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.INT.name(), BaseType.INT)
.put(PrimitiveObjectInspector.PrimitiveCategory.LONG.name(), BaseType.BIGINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.FLOAT.name(), BaseType.FLOAT)
.put(PrimitiveObjectInspector.PrimitiveCategory.DOUBLE.name(), BaseType.DOUBLE)
.put(PrimitiveObjectInspector.PrimitiveCategory.DATE.name(), BaseType.DATE)
.put(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP.name(), BaseType.TIMESTAMP)
.put(PrimitiveObjectInspector.PrimitiveCategory.BINARY.name(), VarbinaryType.VARBINARY)
.put(PrimitiveObjectInspector.PrimitiveCategory.VOID.name(), VarbinaryType.VARBINARY)
.put(PrimitiveObjectInspector.PrimitiveCategory.STRING.name(), BaseType.STRING)
.put(TypeEnum.DATE.getType(), BaseType.DATE)
.build();
}
| 9,530 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/package-info.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector converters.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.converters;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,531 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveTypeConverter.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.ParametricType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeEnum;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.common.type.TypeSignature;
import com.netflix.metacat.common.type.TypeUtils;
import com.netflix.metacat.common.type.VarcharType;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.Schema;
import org.apache.iceberg.types.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* Class to convert hive to canonical type and vice versa.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveTypeConverter implements ConnectorTypeConverter {
// matches decimal declarations with only scale, ex: decimal(38)
// matches declarations with spaces around '(', the scale and ')'
private static final String DECIMAL_WITH_SCALE
= "decimal\\s*\\(\\s*[0-9]+\\s*\\)";
// matches decimal declarations with scale and precision, ex: decimal(38,9)
// matches declarations with spaces around '(', the scale, the precision, the comma and ')'
private static final String DECIMAL_WITH_SCALE_AND_PRECISION
= "decimal\\s*\\(\\s*[0-9]+\\s*,\\s*[0-9]*\\s*\\)";
// combined compiled pattern to match both
private static final Pattern DECIMAL_TYPE
= Pattern.compile(DECIMAL_WITH_SCALE + "|" + DECIMAL_WITH_SCALE_AND_PRECISION, Pattern.CASE_INSENSITIVE);
private static Type getPrimitiveType(final ObjectInspector fieldInspector) {
final PrimitiveCategory primitiveCategory = ((PrimitiveObjectInspector) fieldInspector)
.getPrimitiveCategory();
if (HiveTypeMapping.getHIVE_TO_CANONICAL().containsKey(primitiveCategory.name())) {
return HiveTypeMapping.getHIVE_TO_CANONICAL().get(primitiveCategory.name());
}
switch (primitiveCategory) {
case DECIMAL:
final DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) ((PrimitiveObjectInspector) fieldInspector)
.getTypeInfo();
return DecimalType.createDecimalType(decimalTypeInfo.precision(), decimalTypeInfo.getScale());
case CHAR:
final int cLength = ((CharTypeInfo) ((PrimitiveObjectInspector)
fieldInspector).getTypeInfo()).getLength();
return CharType.createCharType(cLength);
case VARCHAR:
final int vLength = ((VarcharTypeInfo) ((PrimitiveObjectInspector) fieldInspector)
.getTypeInfo()).getLength();
return VarcharType.createVarcharType(vLength);
default:
return null;
}
}
@Override
public Type toMetacatType(final String type) {
// Hack to fix presto "varchar" type coming in with no length which is required by Hive.
final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(sanitizeType(type));
ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
// The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
// the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
// their original case
if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
structTypeInfo.getAllStructFieldNames(), objectInspector);
}
return getCanonicalType(oi);
}
/**
* Converts iceberg schema to field dto.
*
* @param schema schema
* @param partitionFields partitioned fields
* @return list of field Info
*/
public List<FieldInfo> icebergeSchemaTofieldDtos(final Schema schema,
final List<PartitionField> partitionFields) {
final List<FieldInfo> fields = Lists.newArrayList();
final List<String> partitionNames =
partitionFields.stream()
.map(f -> schema.findField(f.sourceId()).name()).collect(Collectors.toList());
for (Types.NestedField field : schema.columns()) {
final FieldInfo fieldInfo = new FieldInfo();
fieldInfo.setName(field.name());
final org.apache.iceberg.types.Type fieldType = field.type();
fieldInfo.setSourceType(fieldType.toString());
fieldInfo.setType(toMetacatType(fromIcebergToHiveType(fieldType)));
fieldInfo.setIsNullable(field.isOptional());
fieldInfo.setComment(field.doc());
fieldInfo.setPartitionKey(partitionNames.contains(field.name()));
fields.add(fieldInfo);
}
return fields;
}
/**
* convert iceberg to hive type.
* @param type iceberg type.
* @return hive type string.
*/
public static String fromIcebergToHiveType(final org.apache.iceberg.types.Type type) {
switch (type.typeId()) {
case BOOLEAN:
return serdeConstants.BOOLEAN_TYPE_NAME;
case INTEGER:
return serdeConstants.INT_TYPE_NAME;
case LONG:
return serdeConstants.BIGINT_TYPE_NAME;
case FLOAT:
return serdeConstants.FLOAT_TYPE_NAME;
case DOUBLE:
return serdeConstants.DOUBLE_TYPE_NAME;
case DATE:
return serdeConstants.DATE_TYPE_NAME;
case TIME:
throw new UnsupportedOperationException("Hive does not support time fields");
case TIMESTAMP:
return serdeConstants.TIMESTAMP_TYPE_NAME;
case STRING:
case UUID:
return serdeConstants.STRING_TYPE_NAME;
case FIXED:
return serdeConstants.BINARY_TYPE_NAME;
case BINARY:
return serdeConstants.BINARY_TYPE_NAME;
case DECIMAL:
final Types.DecimalType decimalType = (Types.DecimalType) type;
return String.format("decimal(%s,%s)", decimalType.precision(), decimalType.scale());
case STRUCT:
final Types.StructType structType = type.asStructType();
final String nameToType = (String) structType.fields().stream().map((f) -> {
return String.format("%s:%s", f.name(), fromIcebergToHiveType(f.type()));
}).collect(Collectors.joining(","));
return String.format("struct<%s>", nameToType);
case LIST:
final Types.ListType listType = type.asListType();
return String.format("array<%s>", fromIcebergToHiveType(listType.elementType()));
case MAP:
final Types.MapType mapType = type.asMapType();
return String.format("map<%s,%s>", fromIcebergToHiveType(mapType.keyType()),
fromIcebergToHiveType(mapType.valueType()));
default:
throw new UnsupportedOperationException(type + " is not supported");
}
}
@Override
public String fromMetacatType(final Type type) {
if (HiveTypeMapping.getCANONICAL_TO_HIVE().containsKey(type)) {
return HiveTypeMapping.getCANONICAL_TO_HIVE().get(type);
}
if (type instanceof DecimalType | type instanceof CharType | type instanceof VarcharType) {
return type.getDisplayName();
} else if (type.getTypeSignature().getBase().equals(TypeEnum.MAP)) {
final MapType mapType = (MapType) type;
return "map<" + fromMetacatType(mapType.getKeyType())
+ "," + fromMetacatType(mapType.getValueType()) + ">";
} else if (type.getTypeSignature().getBase().equals(TypeEnum.ROW)) {
final RowType rowType = (RowType) type;
final String typeString = rowType.getFields()
.stream()
.map(this::rowFieldToString)
.collect(Collectors.joining(","));
return "struct<" + typeString + ">";
} else if (type.getTypeSignature().getBase().equals(TypeEnum.ARRAY)) {
final String typeString = ((ParametricType) type).getParameters().stream().map(this::fromMetacatType)
.collect(Collectors.joining(","));
return "array<" + typeString + ">";
}
return null;
}
private String rowFieldToString(final RowType.RowField rowField) {
String prefix = "";
if (rowField.getName() != null) {
prefix = rowField.getName() + ":";
}
return prefix + fromMetacatType(rowField.getType());
}
/**
* Sanitize the type to handle Hive type conversion edge cases.
*
* @param type the type to sanitize
* @return the sanitized type
*/
public static String sanitizeType(final String type) {
if ("varchar".equalsIgnoreCase(type)) {
return serdeConstants.STRING_TYPE_NAME;
} else {
// the current version of Hive (1.2.1) cannot handle spaces in column definitions
// this was fixed in 1.3.0. See: https://issues.apache.org/jira/browse/HIVE-11476
// this bug caused an error in loading the table information in Metacat
// see: https://netflix.slack.com/archives/G0SUNC804/p1676930065306799
// Here the offending column definition was decimal(38, 9)
// which had a space between the command and the digit 9
// instead of upgrading the Hive version, we are making a targeted "fix"
// to handle this space in a decimal column declaration
// the regex we use tries to match various decimal declarations
// and handles decimal types inside other type declarations like array and struct
// see the unit test for those method for all the cases handled
final Matcher matcher = DECIMAL_TYPE.matcher(type);
final StringBuilder replacedType = new StringBuilder();
// keep track of the start of the substring that we haven't matched yet
// more explanation on how this is used is below
int prevStart = 0;
// we cannot simply use matcher.matches() and matcher.replaceAll()
// because that will replace the decimal declaration itself
// instead we use the region APIs (start() and end()) to find the substring that matched
// and then apply the replace function to remove spaces in the decimal declaration
// we do this for all the matches in the type declaration and hence the usage of the while loop
while (matcher.find()) {
// this index represents the start index (inclusive) of our current match
final int currMatchStart = matcher.start();
// this represents the end index (exclusive) of our current match
final int currMatchEnd = matcher.end();
replacedType
// first append any part of the string that did not match
// this is represented by the prevStart (inclusive) till the start of the current match (exclusive)
// this append should not need any replacement and can be added verbatim
.append(type, prevStart, currMatchStart)
// Then append the matching part which should be a decimal declaration
// The matching part is start (inclusive) and end (exclusive)
// This part should go through a replacement to remove spaces
.append(type.substring(currMatchStart, currMatchEnd).replaceAll("\\s", ""));
// update the prevStart marker so that for the next match
// we know where to start to add the non-matching part
prevStart = currMatchEnd;
}
// append any remaining part of the input type to the final answer
// again, no replacement necessary for this part since it should not contain and decimal declarations
// phew!
replacedType.append(type.substring(prevStart));
return replacedType.toString();
}
}
/**
* Returns the canonical type.
*
* @param fieldInspector inspector
* @return type
*/
Type getCanonicalType(final ObjectInspector fieldInspector) {
switch (fieldInspector.getCategory()) {
case PRIMITIVE:
return getPrimitiveType(fieldInspector);
case MAP:
final MapObjectInspector mapObjectInspector =
TypeUtils.checkType(fieldInspector, MapObjectInspector.class,
"fieldInspector");
final Type keyType = getCanonicalType(mapObjectInspector.getMapKeyObjectInspector());
final Type valueType = getCanonicalType(mapObjectInspector.getMapValueObjectInspector());
if (keyType == null || valueType == null) {
return null;
}
return TypeRegistry.getTypeRegistry().getParameterizedType(TypeEnum.MAP,
ImmutableList.of(keyType.getTypeSignature(), valueType.getTypeSignature()), ImmutableList.of());
case LIST:
final ListObjectInspector listObjectInspector =
TypeUtils.checkType(fieldInspector, ListObjectInspector.class,
"fieldInspector");
final Type elementType =
getCanonicalType(listObjectInspector.getListElementObjectInspector());
if (elementType == null) {
return null;
}
return TypeRegistry.getTypeRegistry().getParameterizedType(TypeEnum.ARRAY,
ImmutableList.of(elementType.getTypeSignature()), ImmutableList.of());
case STRUCT:
final StructObjectInspector structObjectInspector =
TypeUtils.checkType(fieldInspector, StructObjectInspector.class, "fieldInspector");
final List<TypeSignature> fieldTypes = new ArrayList<>();
final List<Object> fieldNames = new ArrayList<>();
for (StructField field : structObjectInspector.getAllStructFieldRefs()) {
fieldNames.add(field.getFieldName());
final Type fieldType = getCanonicalType(field.getFieldObjectInspector());
if (fieldType == null) {
return null;
}
fieldTypes.add(fieldType.getTypeSignature());
}
return TypeRegistry.getTypeRegistry()
.getParameterizedType(TypeEnum.ROW, fieldTypes, fieldNames);
default:
log.info("Currently unsupported type {}, returning Unknown type", fieldInspector.getTypeName());
return BaseType.UNKNOWN;
}
}
// This is protected and extends StandardStructObjectInspector so it can reference MyField
protected static class SameCaseStandardStructObjectInspector extends StandardStructObjectInspector {
private final List<String> realFieldNames;
private final StandardStructObjectInspector structObjectInspector;
public SameCaseStandardStructObjectInspector(final List<String> realFieldNames,
final StandardStructObjectInspector structObjectInspector) {
this.realFieldNames = realFieldNames;
this.structObjectInspector = structObjectInspector;
}
@Override
public List<? extends StructField> getAllStructFieldRefs() {
return structObjectInspector.getAllStructFieldRefs()
.stream()
.map(structField -> (MyField) structField)
.map(field -> new HiveTypeConverter.
SameCaseStandardStructObjectInspector.SameCaseMyField(field.getFieldID(),
realFieldNames.get(field.getFieldID()),
field.getFieldObjectInspector(), field.getFieldComment()))
.collect(Collectors.toList());
}
protected static class SameCaseMyField extends MyField {
public SameCaseMyField(final int fieldID, final String fieldName,
final ObjectInspector fieldObjectInspector,
final String fieldComment) {
super(fieldID, fieldName, fieldObjectInspector, fieldComment);
// Since super lower cases fieldName, this is to restore the original case
this.fieldName = fieldName;
}
}
}
}
| 9,532 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/DataMetadataMetricConstants.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
/**
* Data Metric Constants.
*
* @author zhenl
* @since 1.2.0
*/
public final class DataMetadataMetricConstants {
/**
* DATA_METADATA_METRIC_NAME.
*/
public static final String DATA_METADATA_METRIC_NAME = "metrics";
/**
* DATA_METADATA_VALUE.
*/
public static final String DATA_METADATA_VALUE = "value";
private DataMetadataMetricConstants() { }
}
| 9,533 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOpsProxy.java
|
package com.netflix.metacat.connector.hive.iceberg;
import org.apache.iceberg.TableMetadata;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.Cacheable;
/**
* Proxy class to get the metadata from cache if exists.
*/
@CacheConfig(cacheNames = "metacat")
public class IcebergTableOpsProxy {
/**
* Return the table metadata from cache if exists. If not exists, make the iceberg call to refresh it.
* @param icebergTableOps iceberg table operations
* @param useCache true, if table can be retrieved from cache
* @return TableMetadata
*/
@Cacheable(key = "'iceberg.' + #icebergTableOps.currentMetadataLocation()", condition = "#useCache")
public TableMetadata getMetadata(final IcebergTableOps icebergTableOps, final boolean useCache) {
return icebergTableOps.refresh();
}
}
| 9,534 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergMetastoreTables.java
|
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import org.apache.iceberg.BaseMetastoreCatalog;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import java.util.List;
import java.util.Map;
/**
* Implemented BaseMetastoreTables to interact with iceberg library.
* Load an iceberg table from a location.
*/
public final class IcebergMetastoreTables extends BaseMetastoreCatalog {
private IcebergTableOps tableOperations;
IcebergMetastoreTables(final IcebergTableOps tableOperations) {
this.tableOperations = tableOperations;
}
@Override
public List<TableIdentifier> listTables(final Namespace namespace) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public String name() {
return "";
}
@Override
public Table createTable(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Transaction newCreateTableTransaction(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Transaction newReplaceTableTransaction(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties,
final boolean orCreate) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Table loadTable(final TableIdentifier identifier) {
return super.loadTable(identifier);
}
@Override
protected TableOperations newTableOps(final TableIdentifier tableIdentifier) {
return getTableOps();
}
@Override
protected String defaultWarehouseLocation(final TableIdentifier tableIdentifier) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public boolean dropTable(final TableIdentifier identifier,
final boolean purge) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public void renameTable(final TableIdentifier from,
final TableIdentifier to) {
throw new MetacatNotSupportedException("not supported");
}
/**
* Creates a new instance of IcebergTableOps for the given table location, if not exists.
*
* @return a MetacatServerOps for the table
*/
public IcebergTableOps getTableOps() {
return tableOperations;
}
}
| 9,535 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/DataMetadataMetrics.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import lombok.Getter;
/**
* Data Metrics.
*
* @author zhenl
* @since 1.2.0
*/
@Getter
public enum DataMetadataMetrics {
/**
* number of rows.
*/
rowCount("com.netflix.dse.mds.metric.RowCount"),
/**
* number of files.
*/
fileCount("com.netflix.dse.mds.metric.NumFiles");
private final String metricName;
DataMetadataMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 9,536 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOps.java
|
package com.netflix.metacat.connector.hive.iceberg;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.server.properties.Config;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.io.FileIO;
/**
* Implemented the BaseMetastoreTableOperations to interact with iceberg library.
* Read only operations.
*/
public class IcebergTableOps extends BaseMetastoreTableOperations {
private String location;
private String tableName;
private final Configuration conf;
private final Config config;
private final IcebergTableOpsProxy icebergTableOpsProxy;
private TableMetadata tableMetadata;
/**
* Constructor.
* @param conf hive configuration
* @param location table manifest location
* @param tableName table name
* @param config server config
* @param icebergTableOpsProxy IcebergTableOps proxy
*/
public IcebergTableOps(final Configuration conf,
final String location,
final String tableName,
final Config config,
final IcebergTableOpsProxy icebergTableOpsProxy) {
this.location = location;
this.tableName = tableName;
this.conf = conf;
this.config = config;
this.icebergTableOpsProxy = icebergTableOpsProxy;
}
@Override
protected String tableName() {
return tableName;
}
@Override
public TableMetadata current() {
if (tableMetadata == null) {
tableMetadata =
icebergTableOpsProxy.getMetadata(this, config.isIcebergTableMetadataCacheEnabled());
}
return tableMetadata;
}
@Override
public FileIO io() {
return new HadoopFileIO(conf);
}
@Override
public TableMetadata refresh() {
try {
refreshFromMetadataLocation(this.location, config.getIcebergRefreshFromMetadataLocationRetryNumber());
return super.current();
} catch (Exception e) {
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex.getMessage().contains("NoSuchKey")) {
throw new NotFoundException(e, String.format("Location %s does not exist", location));
}
}
throw e;
}
}
@Override
public String currentMetadataLocation() {
return location;
}
@Override
public void commit(final TableMetadata base, final TableMetadata metadata) {
if (!base.equals(metadata)) {
location = writeNewMetadata(metadata, currentVersion() + 1);
tableMetadata = null;
this.requestRefresh();
}
}
}
| 9,537 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableCriteria.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.QualifiedName;
/**
* Iceberg Table Criteria.
*
* @author zhenl
* @since 1.2.0
*/
public interface IcebergTableCriteria {
/**
* To control iceberg table operation in metacat. The criteria implementation throws exception if
* the iceberg table doesn't satisfy the criteria, e.g. the manifest file doesn't exist, or too large.
*
* @param tableName qualified table name
* @param tableLocation table location.
*/
default void checkCriteria(final QualifiedName tableName, final String tableLocation) {
}
}
| 9,538 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableHandler.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.parser.ParseException;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.sql.DirectSqlGetPartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.metacat.connector.hive.util.IcebergFilterGenerator;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.ScanSummary;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadataParser;
import org.apache.iceberg.UpdateSchema;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.types.Types;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.StringReader;
import java.time.Instant;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Iceberg table handler which interacts with iceberg library
* to perform iceberg table loading, querying, etc. The operations limit to
* read-only for now.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class IcebergTableHandler {
private static final Retryer<Void> RETRY_ICEBERG_TABLE_UPDATE = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(TablePreconditionFailedException.class)
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
private final Configuration conf;
private final ConnectorContext connectorContext;
private final Registry registry;
@VisibleForTesting
private IcebergTableCriteria icebergTableCriteria;
@VisibleForTesting
private IcebergTableOpWrapper icebergTableOpWrapper;
private IcebergTableOpsProxy icebergTableOpsProxy;
/**
* Constructor.
*
* @param connectorContext connector context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
*/
public IcebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
this.conf = new Configuration();
this.connectorContext = connectorContext;
this.registry = connectorContext.getRegistry();
connectorContext.getConfiguration().keySet()
.forEach(key -> conf.set(key, connectorContext.getConfiguration().get(key)));
this.icebergTableCriteria = icebergTableCriteria;
this.icebergTableOpWrapper = icebergTableOpWrapper;
this.icebergTableOpsProxy = icebergTableOpsProxy;
}
/**
* Returns the partitions for the given table and filter.
*
* @param tableInfo the table info
* @param context the request context
* @param filterExpression the filter expression
* @param partitionIds the partition ids to match
* @param sort the sort order
* @return the list of partitions
*/
public List<PartitionInfo> getPartitions(final TableInfo tableInfo,
final ConnectorContext context,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
@Nullable final Sort sort) {
final QualifiedName tableName = tableInfo.getName();
final org.apache.iceberg.Table icebergTable = getIcebergTable(tableName,
HiveTableUtil.getIcebergTableMetadataLocation(tableInfo), false).getTable();
final Map<String, ScanSummary.PartitionMetrics> partitionMap
= getIcebergTablePartitionMap(tableName, filterExpression, icebergTable);
final AuditInfo tableAuditInfo = tableInfo.getAudit();
final List<PartitionInfo> filteredPartitionList = partitionMap.keySet().stream()
.filter(partitionName -> partitionIds == null || partitionIds.contains(partitionName))
.map(partitionName ->
PartitionInfo.builder().name(
QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(),
tableName.getTableName(),
partitionName)
).serde(StorageInfo.builder().uri(
getIcebergPartitionURI(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionName,
partitionMap.get(partitionName).dataTimestampMillis(),
context
)).build()
)
.dataMetrics(getDataMetadataFromIcebergMetrics(partitionMap.get(partitionName)))
.auditInfo(
AuditInfo.builder()
.createdBy(tableAuditInfo.getCreatedBy())
.createdDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis()))
.lastModifiedDate(
fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis()))
.build()
).build()
)
.collect(Collectors.toList());
if (sort != null) {
if (sort.hasSort() && sort.getSortBy().equalsIgnoreCase(DirectSqlGetPartition.FIELD_DATE_CREATED)) {
final Comparator<PartitionInfo> dateCreatedComparator = Comparator.comparing(
p -> p.getAudit() != null ? p.getAudit().getCreatedDate() : null,
Comparator.nullsLast(Date::compareTo));
ConnectorUtils.sort(filteredPartitionList, sort, dateCreatedComparator);
} else {
// Sort using the partition name by default
final Comparator<PartitionInfo> nameComparator = Comparator.comparing(p -> p.getName().toString());
ConnectorUtils.sort(filteredPartitionList, sort, nameComparator);
}
}
return filteredPartitionList;
}
/**
* get Partition Map.
*
* @param tableName Qualified table name
* @param filterExpression the filter
* @param icebergTable iceberg Table
* @return partition map
*/
public Map<String, ScanSummary.PartitionMetrics> getIcebergTablePartitionMap(
final QualifiedName tableName,
@Nullable final String filterExpression,
final Table icebergTable) {
final long start = this.registry.clock().wallTime();
final Map<String, ScanSummary.PartitionMetrics> result;
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final IcebergFilterGenerator icebergFilterGenerator
= new IcebergFilterGenerator(icebergTable.schema().columns());
final Expression filter = (Expression) new PartitionParser(
new StringReader(filterExpression)).filter()
.jjtAccept(icebergFilterGenerator, null);
result = this.icebergTableOpWrapper.getPartitionMetricsMap(icebergTable, filter);
} else {
result = this.icebergTableOpWrapper.getPartitionMetricsMap(icebergTable, null);
}
} catch (ParseException ex) {
log.error("Iceberg filter parse error: ", ex);
throw new IllegalArgumentException(String.format("Iceberg filter parse error. Ex: %s", ex.getMessage()));
} catch (IllegalStateException e) {
registry.counter(registry.createId(IcebergRequestMetrics.CounterGetPartitionsExceedThresholdFailure
.getMetricName()).withTags(tableName.parts())).increment();
final String message =
String.format("Number of partitions queried for table %s exceeded the threshold %d",
tableName, connectorContext.getConfig().getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to getIcebergTablePartitionMap {} is {} ms", tableName, duration);
this.recordTimer(
IcebergRequestMetrics.TagGetPartitionMap.getMetricName(), duration);
this.increaseCounter(
IcebergRequestMetrics.TagGetPartitionMap.getMetricName(), tableName);
}
return result;
}
/**
* get iceberg table.
*
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param includeInfoDetails if true, will include more details like the manifest file content
* @return iceberg table
*/
public IcebergTableWrapper getIcebergTable(final QualifiedName tableName, final String tableMetadataLocation,
final boolean includeInfoDetails) {
final long start = this.registry.clock().wallTime();
try {
this.icebergTableCriteria.checkCriteria(tableName, tableMetadataLocation);
log.debug("Loading icebergTable {} from {}", tableName, tableMetadataLocation);
final IcebergMetastoreTables icebergMetastoreTables = new IcebergMetastoreTables(
new IcebergTableOps(conf, tableMetadataLocation, tableName.getTableName(),
connectorContext.getConfig(),
icebergTableOpsProxy));
final Table table = icebergMetastoreTables.loadTable(
HiveTableUtil.qualifiedNameToTableIdentifier(tableName));
final Map<String, String> extraProperties = Maps.newHashMap();
if (includeInfoDetails) {
extraProperties.put(DirectSqlTable.PARAM_METADATA_CONTENT,
TableMetadataParser.toJson(icebergMetastoreTables.getTableOps().current()));
}
return new IcebergTableWrapper(table, extraProperties);
} catch (NotFoundException | NoSuchTableException e) {
throw new InvalidMetaException(tableName, e);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to getIcebergTable {} is {} ms", tableName, duration);
this.recordTimer(IcebergRequestMetrics.TagLoadTable.getMetricName(), duration);
this.increaseCounter(IcebergRequestMetrics.TagLoadTable.getMetricName(), tableName);
}
}
/**
* Updates the iceberg schema if the provided tableInfo has updated field comments.
*
* @param tableInfo table information
* @return true if an update is done
*/
public boolean update(final TableInfo tableInfo) {
boolean result = false;
final List<FieldInfo> fields = tableInfo.getFields();
if (fields != null && !fields.isEmpty()
// This parameter is only sent during data change and not during schema change.
&& Strings.isNullOrEmpty(tableInfo.getMetadata().get(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION))) {
final QualifiedName tableName = tableInfo.getName();
final String tableMetadataLocation = HiveTableUtil.getIcebergTableMetadataLocation(tableInfo);
if (Strings.isNullOrEmpty(tableMetadataLocation)) {
final String message = String.format("No metadata location specified for table %s", tableName);
log.error(message);
throw new MetacatBadRequestException(message);
}
final IcebergMetastoreTables icebergMetastoreTables = new IcebergMetastoreTables(
new IcebergTableOps(conf, tableMetadataLocation, tableName.getTableName(),
connectorContext.getConfig(),
icebergTableOpsProxy));
final Table table = icebergMetastoreTables.loadTable(
HiveTableUtil.qualifiedNameToTableIdentifier(tableName));
final UpdateSchema updateSchema = table.updateSchema();
final Schema schema = table.schema();
for (FieldInfo field : fields) {
final Types.NestedField iField = schema.findField(field.getName());
if (iField != null && !Objects.equals(field.getComment(), iField.doc())) {
updateSchema.updateColumnDoc(field.getName(), field.getComment());
result = true;
}
}
if (result) {
updateSchema.commit();
final String newTableMetadataLocation = icebergMetastoreTables.getTableOps().currentMetadataLocation();
if (!tableMetadataLocation.equalsIgnoreCase(newTableMetadataLocation)) {
tableInfo.getMetadata().put(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION, tableMetadataLocation);
tableInfo.getMetadata().put(DirectSqlTable.PARAM_METADATA_LOCATION, newTableMetadataLocation);
}
}
}
return result;
}
/**
* Handle iceberg table update operation.
*
* @param requestContext request context
* @param directSqlTable direct sql table object
* @param tableInfo table info
*/
public void handleUpdate(final ConnectorRequestContext requestContext,
final DirectSqlTable directSqlTable,
final TableInfo tableInfo) {
requestContext.setIgnoreErrorsAfterUpdate(true);
this.update(tableInfo);
// TODO: only trying once for correctness for now to fix a race condition that could lead to data loss
// but this needs more retries in case of schema updates for better user experience
directSqlTable.updateIcebergTable(tableInfo);
}
/**
* get data metadata from partition metrics.
*
* @param metrics metrics.
* @return object node of the metrics
*/
public ObjectNode getDataMetadataFromIcebergMetrics(
final ScanSummary.PartitionMetrics metrics) {
final ObjectNode root = JsonNodeFactory.instance.objectNode();
root.set(DataMetadataMetricConstants.DATA_METADATA_METRIC_NAME, getMetricValueNode(metrics));
return root;
}
/**
* Checks if the given iceberg table metadata location exists.
*
* @param tableName The table name.
* @param metadataLocation The metadata location.
* @return True if the location exists.
*/
public boolean doesMetadataLocationExist(final QualifiedName tableName,
final String metadataLocation) {
boolean result = false;
if (!StringUtils.isBlank(metadataLocation)) {
try {
final Path metadataPath = new Path(metadataLocation);
result = getFs(metadataPath, conf).exists(metadataPath);
} catch (Exception ignored) {
log.warn(String.format("Failed getting the filesystem for metadata location: %s tableName: %s",
metadataLocation, tableName));
registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
}
}
return result;
}
private static FileSystem getFs(final Path path,
final Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException ex) {
throw new RuntimeException(String.format("Failed to get file system for path: %s", path));
}
}
private ObjectNode getMetricValueNode(final ScanSummary.PartitionMetrics metrics) {
final ObjectNode node = JsonNodeFactory.instance.objectNode();
ObjectNode valueNode = JsonNodeFactory.instance.objectNode();
valueNode.put(DataMetadataMetricConstants.DATA_METADATA_VALUE, metrics.recordCount());
node.set(DataMetadataMetrics.rowCount.getMetricName(), valueNode);
valueNode = JsonNodeFactory.instance.objectNode();
valueNode.put(DataMetadataMetricConstants.DATA_METADATA_VALUE, metrics.fileCount());
node.set(DataMetadataMetrics.fileCount.getMetricName(), valueNode);
return node;
}
/**
* record the duration to timer.
*
* @param requestTag tag name.
* @param duration duration of the operation.
*/
private void recordTimer(final String requestTag, final long duration) {
final HashMap<String, String> tags = new HashMap<>();
tags.put("request", requestTag);
this.registry.timer(registry.createId(IcebergRequestMetrics.TimerIcebergRequest.getMetricName())
.withTags(tags))
.record(duration, TimeUnit.MILLISECONDS);
log.debug("## Time taken to complete {} is {} ms", requestTag, duration);
}
/**
* increase the counter of operation.
*
* @param metricName metric name
* @param tableName table name of the operation
*/
private void increaseCounter(final String metricName, final QualifiedName tableName) {
this.registry.counter(registry.createId(metricName).withTags(tableName.parts())).increment();
}
private Date fromEpochMilliToDate(@Nullable final Long l) {
return (l == null) ? null : Date.from(Instant.ofEpochMilli(l));
}
//iceberg://<db-name.table-name>/<partition>/snapshot_time=<dateCreated>
private String getIcebergPartitionURI(final String databaseName,
final String tableName,
final String partitionName,
@Nullable final Long dataTimestampMillis,
final ConnectorContext context) {
return String.format("%s://%s.%s/%s/snapshot_time=%s",
context.getConfig().getIcebergPartitionUriScheme(),
databaseName,
tableName,
partitionName,
(dataTimestampMillis == null) ? partitionName.hashCode()
: Instant.ofEpochMilli(dataTimestampMillis).getEpochSecond());
}
}
| 9,539 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableWrapper.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.iceberg;
import org.apache.iceberg.Table;
import lombok.Data;
import java.util.Map;
/**
* This class represents the iceberg table.
*/
@Data
public class IcebergTableWrapper {
private final Table table;
private final Map<String, String> extraProperties;
}
| 9,540 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableCriteriaImpl.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
/**
* Default Iceberg table criteria implementation.
*
* @author zhenl
* @since 1.2.0
*/
public class IcebergTableCriteriaImpl implements IcebergTableCriteria {
private final ConnectorContext connectorContext;
/**
* Iceberg table criteriaImpl constructor.
* @param connectorContext connector context
*/
public IcebergTableCriteriaImpl(final ConnectorContext connectorContext) {
this.connectorContext = connectorContext;
}
}
| 9,541 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOpWrapper.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.apache.iceberg.ScanSummary;
import org.apache.iceberg.Table;
import org.apache.iceberg.expressions.Expression;
import javax.annotation.Nullable;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
* Iceberg table operation wrapper.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class IcebergTableOpWrapper {
private final Config config;
private final Map<String, String> configuration;
private final ThreadServiceManager threadServiceManager;
/**
* Constructor.
* @param connectorContext server context
* @param threadServiceManager executor service
*/
public IcebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
this.config = connectorContext.getConfig();
this.configuration = connectorContext.getConfiguration();
this.threadServiceManager = threadServiceManager;
}
/**
* get iceberg partition map.
*
* @param icebergTable iceberg table
* @param filter iceberg filter expression
* @return scan summary map
*/
public Map<String, ScanSummary.PartitionMetrics> getPartitionMetricsMap(final Table icebergTable,
@Nullable final Expression filter) {
Map<String, ScanSummary.PartitionMetrics> result = Maps.newHashMap();
//
// Cancel the iceberg call if it times out.
//
final Future<Map<String, ScanSummary.PartitionMetrics>> future = threadServiceManager.getExecutor()
.submit(() -> (filter != null) ? ScanSummary.of(icebergTable.newScan().filter(filter))
.limit(config.getMaxPartitionsThreshold())
.throwIfLimited()
.build()
:
ScanSummary.of(icebergTable.newScan()) //the top x records
.limit(config.getIcebergTableSummaryFetchSize())
.build());
try {
final int getIcebergPartitionsTimeout = Integer.parseInt(configuration
.getOrDefault(HiveConfigConstants.GET_ICEBERG_PARTITIONS_TIMEOUT, "120"));
result = future.get(getIcebergPartitionsTimeout, TimeUnit.SECONDS);
} catch (Exception e) {
if (!future.isDone()) {
try {
future.cancel(true);
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partitions for an iceberg table.");
}
}
if (e instanceof ExecutionException && e.getCause() != null) {
//
// On execution exception, throw the inner exception. This is added to throw these as 4xx errors
// instead of 5xx.
//
if (e.getCause() instanceof IllegalArgumentException) {
throw (IllegalArgumentException) e.getCause();
}
Throwables.propagate(e.getCause());
}
Throwables.propagate(e);
}
return result;
}
}
| 9,542 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/package-info.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Iceberg table interaction.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.iceberg;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,543 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergRequestMetrics.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.iceberg;
import lombok.Getter;
/**
* Iceberg Metrics.
*
* @author zhenl
* @since 1.2.0
*/
@Getter
public enum IcebergRequestMetrics {
/**
* Timer.
*/
TimerIcebergRequest(IcebergRequestMetrics.Type.timer, "requests"),
/**
* Tag of loadTable operation.
*/
TagLoadTable("loadTable"),
/**
* Tag of getPartitionMap operation.
*/
TagGetPartitionMap("getPartitionMap"),
/**
* Counter.
*/
CounterGetPartitionsExceedThresholdFailure(IcebergRequestMetrics.Type.counter,
"getPartitionsExceedThresholdFailure");
enum Type {
counter,
gauge,
timer
}
private final String metricName;
IcebergRequestMetrics(final IcebergRequestMetrics.Type type, final String measure) {
this.metricName = String.format("metacat.iceberg.%s.%s", type.name(), measure);
}
IcebergRequestMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 9,544 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.common.server.connectors.util.TimeUtil;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.client.thrift.HiveMetastoreClientFactory;
import com.netflix.metacat.connector.hive.client.thrift.MetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.URI;
import java.util.concurrent.TimeUnit;
/**
* Hive configs.
*
* @author zhenl
* @since 1.1.0
*/
@Slf4j
@Configuration
public class HiveConnectorConfig {
/**
* create hive connector database service.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverter metacat converter
* @return HiveConnectorDatabaseService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorDatabaseService.class)
public HiveConnectorDatabaseService hiveDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverter
) {
return new HiveConnectorDatabaseService(
metacatHiveClient,
hiveMetacatConverter
);
}
/**
* create hive connector table service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param hiveConnectorDatabaseService hive database service
* @param connectorContext connector config
* @return HiveConnectorTableService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorTableService.class)
public HiveConnectorTableService hiveTableService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final ConnectorContext connectorContext
) {
return new HiveConnectorTableService(
connectorContext.getCatalogName(),
metacatHiveClient,
hiveConnectorDatabaseService,
hiveMetacatConverters,
connectorContext
);
}
/**
* create hive connector partition service.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverter metacat converter
* @param connectorContext connector config
* @return HiveConnectorPartitionService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorPartitionService.class)
public HiveConnectorPartitionService partitionService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverter,
final ConnectorContext connectorContext
) {
return new HiveConnectorPartitionService(
connectorContext,
metacatHiveClient,
hiveMetacatConverter
);
}
/**
* create thrift hive client.
*
* @param connectorContext connector config.
* @return data source
* @throws MetaException meta exception
*/
@Bean
@ConditionalOnMissingBean(IMetacatHiveClient.class)
public IMetacatHiveClient createThriftClient(final ConnectorContext connectorContext) throws MetaException {
final HiveMetastoreClientFactory factory = new HiveMetastoreClientFactory(
null,
(int) TimeUtil.toTime(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.HIVE_METASTORE_TIMEOUT, "20s"),
TimeUnit.SECONDS,
TimeUnit.MILLISECONDS
)
);
final String metastoreUri = connectorContext.getConfiguration().get(HiveConfigConstants.THRIFT_URI);
try {
return new MetacatHiveClient(new URI(metastoreUri), factory);
} catch (Exception e) {
final String message = String.format("Invalid thrift uri %s", metastoreUri);
log.info(message);
throw new IllegalArgumentException(message, e);
}
}
/**
* thread Service Manager.
* @param connectorContext connector config
* @return threadServiceManager
*/
@Bean
public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) {
return new ThreadServiceManager(connectorContext.getRegistry(),
connectorContext.getConfig().getServiceMaxNumberOfThreads(),
1000,
"hive");
}
}
| 9,545 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/CacheConfig.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration for cache.
*
* @author amajumdar
* @since 1.3.0
*/
@Configuration
@ConditionalOnProperty(value = "metacat.cache.enabled", havingValue = "true")
@EnableCaching
public class CacheConfig {
/**
* Returns the cache manager from the parent application context.
* @param connectorContext conector context
* @return CacheManager
*/
@Bean
public CacheManager cacheManager(final ConnectorContext connectorContext) {
return connectorContext.getApplicationContext().getBean(CacheManager.class);
}
}
| 9,546 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorFastServiceConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteria;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy;
import com.netflix.metacat.connector.hive.sql.DirectSqlDatabase;
import com.netflix.metacat.connector.hive.sql.DirectSqlGetPartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlSavePartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastDatabaseService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastPartitionService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastTableService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastTableServiceProxy;
import com.netflix.metacat.connector.hive.sql.SequenceGeneration;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.transaction.annotation.EnableTransactionManagement;
/**
* HiveConnectorFastServiceConfig.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@EnableTransactionManagement(proxyTargetClass = true)
@ConditionalOnProperty(value = "useHiveFastService", havingValue = "true")
public class HiveConnectorFastServiceConfig {
/**
* create hive connector fast service metric.
*
* @param connectorContext connector config
* @return HiveConnectorFastServiceMetric
*/
@Bean
public HiveConnectorFastServiceMetric hiveConnectorFastServiceMetric(
final ConnectorContext connectorContext
) {
return new HiveConnectorFastServiceMetric(
connectorContext.getRegistry()
);
}
/**
* create hive connector fast partition service.
*
* @param metacatHiveClient hive client
* @param warehouse hive warehouse
* @param hiveMetacatConverter metacat converter
* @param connectorContext connector config
* @param directSqlGetPartition service to get partitions
* @param directSqlSavePartition service to save partitions
* @param icebergTableHandler iceberg table handler
* @return HiveConnectorPartitionService
*/
@Bean
public HiveConnectorPartitionService partitionService(
final IMetacatHiveClient metacatHiveClient,
final Warehouse warehouse,
final HiveConnectorInfoConverter hiveMetacatConverter,
final ConnectorContext connectorContext,
final DirectSqlGetPartition directSqlGetPartition,
final DirectSqlSavePartition directSqlSavePartition,
final IcebergTableHandler icebergTableHandler
) {
return new HiveConnectorFastPartitionService(
connectorContext,
metacatHiveClient,
warehouse,
hiveMetacatConverter,
directSqlGetPartition,
directSqlSavePartition,
icebergTableHandler
);
}
/**
* Service to get partitions.
*
* @param threadServiceManager thread service manager
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @return HiveConnectorPartitionService
*/
@Bean
public DirectSqlGetPartition directSqlGetPartition(
final ThreadServiceManager threadServiceManager,
final ConnectorContext connectorContext,
@Qualifier("hiveReadJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlGetPartition(
connectorContext,
threadServiceManager,
hiveJdbcTemplate,
serviceMetric
);
}
/**
* Service to save partitions.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param sequenceGeneration sequence generator
* @param serviceMetric fast service metric
* @return HiveConnectorPartitionService
*/
@Bean
public DirectSqlSavePartition directSqlSavePartition(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final SequenceGeneration sequenceGeneration,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlSavePartition(
connectorContext,
hiveJdbcTemplate,
sequenceGeneration,
serviceMetric
);
}
/**
* Service to generate sequence ids.
*
* @param hiveJdbcTemplate hive JDBC template
* @return HiveConnectorPartitionService
*/
@Bean
public SequenceGeneration sequenceGeneration(
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate
) {
return new SequenceGeneration(hiveJdbcTemplate);
}
/**
* Data access service for table.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @param directSqlSavePartition partition service involving direct sqls
* @param warehouse warehouse
* @return DirectSqlTable
*/
@Bean
public DirectSqlTable directSqlTable(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric,
final DirectSqlSavePartition directSqlSavePartition,
final Warehouse warehouse
) {
return new DirectSqlTable(
connectorContext,
hiveJdbcTemplate,
serviceMetric,
directSqlSavePartition,
warehouse
);
}
/**
* Data access service for database.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @return DirectSqlDatabase
*/
@Bean
public DirectSqlDatabase directSqlDatabase(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlDatabase(
connectorContext,
hiveJdbcTemplate,
serviceMetric
);
}
/**
* create hive connector fast table service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param hiveConnectorDatabaseService hive database service
* @param connectorContext server context
* @param directSqlTable table jpa service
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @param hiveConnectorFastTableServiceProxy hive connector fast table service proxy
* @return HiveConnectorFastTableService
*/
@Bean
public HiveConnectorTableService hiveTableService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final ConnectorContext connectorContext,
final DirectSqlTable directSqlTable,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler,
final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy
) {
return new HiveConnectorFastTableService(
connectorContext.getCatalogName(),
metacatHiveClient,
hiveConnectorDatabaseService,
hiveMetacatConverters,
connectorContext,
directSqlTable,
icebergTableHandler,
commonViewHandler,
hiveConnectorFastTableServiceProxy
);
}
/**
* create hive connector fast table service proxy.
*
* @param hiveMetacatConverters hive metacat converters
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @return HiveConnectorFastTableServiceProxy
*/
@Bean
public HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy(
final HiveConnectorInfoConverter hiveMetacatConverters,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler
) {
return new HiveConnectorFastTableServiceProxy(
hiveMetacatConverters,
icebergTableHandler,
commonViewHandler
);
}
/**
* create hive connector fast database service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param directSqlDatabase database sql service
* @return HiveConnectorDatabaseService
*/
@Bean
public HiveConnectorDatabaseService hiveDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlDatabase directSqlDatabase
) {
return new HiveConnectorFastDatabaseService(
metacatHiveClient,
hiveMetacatConverters,
directSqlDatabase
);
}
/**
* Create iceberg table handler.
* @param connectorContext server context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
* @return IcebergTableHandler
*/
@Bean
public IcebergTableHandler icebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
return new IcebergTableHandler(connectorContext,
icebergTableCriteria,
icebergTableOpWrapper,
icebergTableOpsProxy);
}
/**
*
* Create iceberg table criteria.
* @param connectorContext server context
* @return IcebergTableCriteria
*/
@Bean
public IcebergTableCriteria icebergTableCriteria(final ConnectorContext connectorContext) {
return new IcebergTableCriteriaImpl(connectorContext);
}
/**
* Create iceberg table operation wrapper.
* @param connectorContext server context
* @param threadServiceManager executor service
* @return IcebergTableOpWrapper
*/
@Bean
public IcebergTableOpWrapper icebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
return new IcebergTableOpWrapper(connectorContext, threadServiceManager);
}
/**
* Create commonViewHandler.
*
* @param connectorContext server context
* @return CommonViewHandler
*/
@Bean
public CommonViewHandler commonViewHandler(final ConnectorContext connectorContext) {
return new CommonViewHandler(connectorContext);
}
/**
* Create IcebergTableOps proxy.
* @return IcebergTableOpsProxy
*/
@Bean
public IcebergTableOpsProxy icebergTableOps() {
return new IcebergTableOpsProxy();
}
}
| 9,547 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector config.
*
* @author zhenl
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.configs;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,548 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorClientConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.client.embedded.EmbeddedHiveClient;
import com.netflix.metacat.connector.hive.metastore.HMSHandlerProxy;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
/**
* Hive Connector Client Config.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@ConditionalOnProperty(value = "useEmbeddedClient", havingValue = "true")
public class HiveConnectorClientConfig {
/** Default Query timeout in milliseconds. */
private static final int DEFAULT_DATASTORE_TIMEOUT = 60000;
/** Default Query timeout in milliseconds for reads. */
private static final int DEFAULT_DATASTORE_READ_TIMEOUT = 120000;
/** Default Query timeout in milliseconds for writes. */
private static final int DEFAULT_DATASTORE_WRITE_TIMEOUT = 120000;
/**
* create local hive client.
*
* @param connectorContext connector config context
* @return IMetacatHiveClient
* @throws Exception exception
*/
@Bean
public IMetacatHiveClient createLocalClient(final ConnectorContext connectorContext) throws Exception {
try {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
DataSourceManager.get().load(
connectorContext.getCatalogShardName(),
connectorContext.getConfiguration()
);
return new EmbeddedHiveClient(
connectorContext.getCatalogName(),
HMSHandlerProxy.getProxy(conf, connectorContext.getRegistry()),
connectorContext.getRegistry()
);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"Failed creating the hive metastore client for catalog: %s",
connectorContext.getCatalogName()
),
e
);
}
}
/**
* create warehouse for file system calls.
*
* @param connectorContext connector config context
* @return WareHouse
*/
@Bean
public Warehouse warehouse(final ConnectorContext connectorContext) {
try {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
return new Warehouse(conf);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"Failed creating the hive warehouse for catalog: %s",
connectorContext.getCatalogName()
),
e
);
}
}
/**
* hive DataSource.
*
* @param connectorContext connector config.
* @return data source
*/
@Bean
public DataSource hiveDataSource(final ConnectorContext connectorContext) {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
DataSourceManager.get().load(
connectorContext.getCatalogShardName(),
connectorContext.getConfiguration()
);
return DataSourceManager.get().get(connectorContext.getCatalogShardName());
}
/**
* hive metadata Transaction Manager.
*
* @param hiveDataSource hive data source
* @return hive transaction manager
*/
@Bean
public DataSourceTransactionManager hiveTxManager(
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
return new DataSourceTransactionManager(hiveDataSource);
}
/**
* hive metadata read JDBC template. Query timeout is set to control long running read queries.
*
* @param connectorContext connector config.
* @param hiveDataSource hive data source
* @return hive JDBC Template
*/
@Bean
public JdbcTemplate hiveReadJdbcTemplate(
final ConnectorContext connectorContext,
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
final JdbcTemplate result = new JdbcTemplate(hiveDataSource);
result.setQueryTimeout(getDataStoreReadTimeout(connectorContext) / 1000);
return result;
}
/**
* hive metadata write JDBC template. Query timeout is set to control long running write queries.
*
* @param connectorContext connector config.
* @param hiveDataSource hive data source
* @return hive JDBC Template
*/
@Bean
public JdbcTemplate hiveWriteJdbcTemplate(
final ConnectorContext connectorContext,
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
final JdbcTemplate result = new JdbcTemplate(hiveDataSource);
result.setQueryTimeout(getDataStoreWriteTimeout(connectorContext) / 1000);
return result;
}
@VisibleForTesting
private HiveConf getDefaultConf(
final ConnectorContext connectorContext
) {
final HiveConf result = new HiveConf();
result.setBoolean(HiveConfigConstants.USE_METASTORE_LOCAL, true);
final int dataStoreTimeout = getDataStoreTimeout(connectorContext);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT, dataStoreTimeout);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT, dataStoreTimeout);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT, getDataStoreWriteTimeout(connectorContext));
result.setInt(HiveConfigConstants.HIVE_METASTORE_DS_RETRY, 0);
result.setInt(HiveConfigConstants.HIVE_HMSHANDLER_RETRY, 0);
result.set(
HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS,
HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY
);
result.setBoolean(HiveConfigConstants.HIVE_STATS_AUTOGATHER, false);
return result;
}
private int getDataStoreTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT));
} catch (final Exception ignored) { }
return result;
}
private int getDataStoreReadTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_READ_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT));
} catch (final Exception ignored) { }
return result;
}
private int getDataStoreWriteTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_WRITE_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT));
} catch (final Exception ignored) { }
return result;
}
}
| 9,549 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/monitoring/HiveMetrics.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//CHECKSTYLE:OFF
package com.netflix.metacat.connector.hive.monitoring;
import lombok.Getter;
/**
* Hive Metrics.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
public enum HiveMetrics {
/**
* hive sql lock error.
*/
CounterHiveSqlLockError(Type.counter, "hiveSqlLockError"),
CounterHiveGetTablePartitionsTimeoutFailure(Type.counter,"getPartitionsTimeoutFailure"),
CounterHiveExperimentGetTablePartitionsFailure(Type.counter,"experimentGetPartitionsFailure"),
CounterHivePartitionPathIsNotDir(Type.counter,"partitionPathIsNotDir"),
CounterHivePartitionFileSystemCall(Type.counter,"partitionFileSystemCall"),
CounterHiveGetPartitionsExceedThresholdFailure(Type.counter,"getPartitionsExceedThresholdFailure"),
CounterHiveFileSystemFailure(Type.counter,"fileSystemFailure"),
CounterFileSystemReadFailure(Type.counter,"fileSystemReadFailure"),
/**
* Gauge.
*/
GaugeAddPartitions(Type.gauge, "partitionAdd"),
GaugeDeletePartitions(Type.gauge, "partitionDelete"),
GaugeGetPartitionsCount(Type.gauge, "partitionGet"),
GaugePreExpressionFilterGetPartitionsCount(Type.gauge, "preExpressionFilterGetPartitionsCount"),
/**
* Timer.
*/
TimerHiveRequest(Type.timer, "embeddedclient.requests"), TimerFastHiveRequest(Type.timer, "fast.requests"),
/**
* hive function names.
*/
TagCreateDatabase("createDatabase"),
TagCreateTable("createTable"),
TagDropDatabase("dropDatabase"),
TagDropHivePartitions("dropHivePartitions"),
TagAlterDatabase("alterDatabase"),
TagGetAllDatabases("getAllDatabases"),
TagGetDatabase("getDatabase"),
TagGetAllTables("getAllTables"),
TagGetTableNamesByFilter("getTableNamesByFilter"),
TagGetTableByName("getTableByName"),
TagLoadTable("loadTable"),
TagAlterTable("alterTable"),
TagAddPartitions("addPartitions"),
TagAlterPartitions("alterPartitions"),
TagCreatePartitionLocations("createPartitionLocations"),
TagAddDropPartitions("addDropPartitions"),
TagDropTable("dropTable"),
TagRename("rename"),
TagListPartitionsByFilter("listPartitionsByFilter"),
TagGetPartitions("getPartitions"),
TagGetPartitionCount("getPartitionCount"),
TagGetPartitionKeys("getPartitionKeys"),
TagGetPartitionNames("getPartitionNames"),
TagGetTableNames("getTableNames"),
TagTableExists("tableExists");
enum Type {
counter,
gauge,
timer
}
private final String metricName;
HiveMetrics(final Type type, final String measure) {
this.metricName = String.format("metacat.hive.%s.%s", type.name(), measure);
}
HiveMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 9,550 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/monitoring/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Hive monitor.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.monitoring;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,551 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive client implementation.
* @author zhenl
* @since 1.1.0
*/
package com.netflix.metacat.connector.hive.client;
| 9,552 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/HiveMetastoreClientFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import com.google.common.net.HostAndPort;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import javax.annotation.Nullable;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.Socket;
import java.net.SocketAddress;
/**
* HiveMetastoreClientFactory.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveMetastoreClientFactory {
private final HostAndPort socksProxy;
private int timeoutMillis = 3000;
/**
* Constructor.
*
* @param socksProxy address
* @param timeoutMillis timeoutMillis
*/
public HiveMetastoreClientFactory(@Nullable final HostAndPort socksProxy,
final int timeoutMillis) {
this.socksProxy = socksProxy;
this.timeoutMillis = timeoutMillis;
}
private static Socket createSocksSocket(final HostAndPort proxy) {
final SocketAddress address = InetSocketAddress.createUnresolved(proxy.getHostText(), proxy.getPort());
return new Socket(new Proxy(Proxy.Type.SOCKS, address));
}
private static TTransportException rewriteException(final TTransportException e, final String host) {
return new TTransportException(e.getType(), String.format("%s: %s", host, e.getMessage()), e.getCause());
}
/**
* create.
*
* @param host hostname
* @param port portnum
* @return HiveMetastoreClient
* @throws TTransportException TTransportException
*/
public HiveMetastoreClient create(final String host, final int port)
throws TTransportException {
return new HiveMetastoreClient(createTransport(host, port));
}
protected TTransport createRawTransport(final String host, final int port)
throws TTransportException {
if (socksProxy == null) {
final TTransport transport = new TSocket(host, port, timeoutMillis);
try {
transport.open();
return transport;
} catch (Throwable t) {
transport.close();
throw t;
}
}
final Socket socks = createSocksSocket(socksProxy);
try {
try {
socks.connect(InetSocketAddress.createUnresolved(host, port), timeoutMillis);
socks.setSoTimeout(timeoutMillis);
return new TSocket(socks);
} catch (Throwable t) {
closeQuietly(socks);
throw t;
}
} catch (IOException e) {
throw new TTransportException(e);
}
}
protected TTransport createTransport(final String host, final int port)
throws TTransportException {
try {
return new TTransportWrapper(createRawTransport(host, port), host);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
private static void closeQuietly(final Closeable closeable) {
try {
closeable.close();
} catch (IOException e) {
// ignored
}
}
private static class TTransportWrapper
extends TTransport {
private final TTransport transport;
private final String host;
TTransportWrapper(final TTransport transport, final String host) {
this.transport = transport;
this.host = host;
}
@Override
public boolean isOpen() {
return transport.isOpen();
}
@Override
public boolean peek() {
return transport.peek();
}
@Override
public byte[] getBuffer() {
return transport.getBuffer();
}
@Override
public int getBufferPosition() {
return transport.getBufferPosition();
}
@Override
public int getBytesRemainingInBuffer() {
return transport.getBytesRemainingInBuffer();
}
@Override
public void consumeBuffer(final int len) {
transport.consumeBuffer(len);
}
@Override
public void close() {
transport.close();
}
@Override
public void open()
throws TTransportException {
try {
transport.open();
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public int readAll(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
return transport.readAll(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public int read(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
return transport.read(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void write(final byte[] bytes)
throws TTransportException {
try {
transport.write(bytes);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void write(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
transport.write(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void flush()
throws TTransportException {
try {
transport.flush();
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
}
}
| 9,553 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/MetacatHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransportException;
import javax.annotation.Nullable;
import java.net.URI;
import java.util.List;
import java.util.Set;
/**
* MetacatHiveClient.
*
* @author zhenl
* @since 1.0.0
*/
public class MetacatHiveClient implements IMetacatHiveClient {
private static final short ALL_RESULTS = -1;
private HiveMetastoreClientFactory hiveMetastoreClientFactory;
private final String host;
private final int port;
/**
* Constructor.
*
* @param address address
* @param hiveMetastoreClientFactory hiveMetastoreClientFactory
* @throws MetaException exception
*/
public MetacatHiveClient(final URI address,
final HiveMetastoreClientFactory hiveMetastoreClientFactory)
throws MetaException {
this.hiveMetastoreClientFactory = hiveMetastoreClientFactory;
Preconditions.checkArgument(address.getHost() != null, "metastoreUri host is missing: " + address);
Preconditions.checkArgument(address.getPort() != -1, "metastoreUri port is missing: " + address);
this.host = address.getHost();
this.port = address.getPort();
}
/**
* Create a metastore client instance.
*
* @return hivemetastore client
*/
private HiveMetastoreClient createMetastoreClient() {
try {
return hiveMetastoreClientFactory.create(host, port);
} catch (TTransportException e) {
throw new RuntimeException("Failed connecting to Hive metastore: " + host + ":" + port, e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllDatabases() throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_all_databases();
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllTables(final String databaseName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_all_tables(databaseName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_table_names_by_filter(databaseName, filter, (short) limit);
}
}
/**
* {@inheritDoc}.
*/
@Override
public Table getTableByName(final String databaseName,
final String tableName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_table(databaseName, tableName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createTable(final Table table) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.create_table(table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropTable(final String databaseName,
final String tableName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.drop_table(databaseName, tableName, false);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void rename(final String databaseName,
final String oldName,
final String newdatabadeName,
final String newName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
final Table table = client.get_table(databaseName, oldName);
client.drop_table(databaseName, oldName, false);
table.setDbName(newdatabadeName);
table.setTableName(newName);
client.create_table(table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_table(databaseName, tableName, table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterDatabase(final String databaseName,
final Database database) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_database(databaseName, database);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createDatabase(final Database database) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.create_database(database);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropDatabase(final String dbName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.drop_database(dbName, false, false);
}
}
/**
* {@inheritDoc}.
*/
@Override
public Database getDatabase(final String databaseName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_database(databaseName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
if (partitionNames != null && !partitionNames.isEmpty()) {
return client.get_partitions_by_names(databaseName, tableName, partitionNames);
} else {
return client.get_partitions(databaseName, tableName, ALL_RESULTS);
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
dropHivePartitions(createMetastoreClient(), databaseName, tableName, partitionNames);
}
/**
* {@inheritDoc}.
*/
@Override
public List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_partitions_by_filter(databaseName, tableName, filter, ALL_RESULTS);
}
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(final String databaseName,
final String tableName) throws TException {
return getPartitions(databaseName, tableName, null).size();
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_partition_names(databaseName, tableName, ALL_RESULTS);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void savePartitions(final List<Partition> partitions)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.add_partitions(partitions);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterPartitions(final String dbName, final String tableName,
final List<Partition> partitions) throws
TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_partitions(dbName, tableName, partitions);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName, final String tableName,
final List<Partition> partitions,
final List<String> delPartitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
try {
dropHivePartitions(client, dbName, tableName, delPartitionNames);
client.add_partitions(partitions);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
throw new TException(
String.format("Internal server error adding/dropping partitions for table %s.%s",
dbName, tableName), e);
}
}
}
private void dropHivePartitions(final HiveMetastoreClient client, final String dbName, final String tableName,
final List<String> partitionNames)
throws TException {
if (partitionNames != null && !partitionNames.isEmpty()) {
final DropPartitionsRequest request = new DropPartitionsRequest(dbName, tableName, new RequestPartsSpec(
RequestPartsSpec._Fields.NAMES, partitionNames));
request.setDeleteData(false);
client.drop_partitions_req(request);
}
}
/**
* getRoles.
* @param user user
* @return set of roles
*/
public Set<String> getRoles(final String user) {
return Sets.newHashSet();
}
}
| 9,554 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/HiveMetastoreClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TTransport;
import java.io.Closeable;
/**
* HiveMetastoreClient.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveMetastoreClient
extends ThriftHiveMetastore.Client
implements Closeable {
private final TTransport transport;
/**
* Constructor.
*
* @param transport transport
*/
public HiveMetastoreClient(final TTransport transport) {
super(new TBinaryProtocol(transport));
this.transport = transport;
}
@Override
public void close() {
transport.close();
}
}
| 9,555 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive thrift client implementation.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.client.thrift;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,556 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/EmbeddedHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.metastore.IMetacatHMSHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.datanucleus.exceptions.NucleusDataStoreException;
import javax.annotation.Nullable;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
/**
* Embedded hive metastore client implementation.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class EmbeddedHiveClient implements IMetacatHiveClient {
/**
* EXCEPTION_JDO_PREFIX.
*/
public static final String EXCEPTION_JDO_PREFIX = "javax.jdo.";
/**
* EXCEPTION_SQL_PREFIX.
*/
public static final String EXCEPTION_SQL_PREFIX = "java.sql.SQLException";
/**
* EX_MESSAGE_RESTART_TRANSACTION.
*/
public static final String EX_MESSAGE_RESTART_TRANSACTION = "restarting transaction";
/**
* DEFAULT_PRIVILEGES.
*/
private static final Set<HivePrivilege> DEFAULT_PRIVILEGES =
Sets.newHashSet(HivePrivilege.DELETE, HivePrivilege.INSERT, HivePrivilege.SELECT, HivePrivilege.UPDATE);
/**
* All results.
*/
private static final short ALL_RESULTS = -1;
private final IMetacatHMSHandler handler;
private final Registry registry;
private final Id requestTimerId;
private final Counter hiveSqlErrorCounter;
/**
* Embedded hive client implementation.
*
* @param catalogName catalogName
* @param handler handler
* @param registry registry
*/
public EmbeddedHiveClient(final String catalogName,
@Nullable final IMetacatHMSHandler handler,
final Registry registry) {
this.handler = handler;
this.registry = registry;
this.requestTimerId = registry.createId(HiveMetrics.TimerHiveRequest.getMetricName());
this.hiveSqlErrorCounter =
registry.counter(HiveMetrics.CounterHiveSqlLockError.getMetricName() + "." + catalogName);
}
@Override
public void shutdown() throws TException {
handler.shutdown();
}
private void handleSqlException(final TException ex) {
if ((ex.getCause() instanceof SQLException || ex.getMessage().startsWith(EXCEPTION_JDO_PREFIX)
|| ex.getMessage().contains(EXCEPTION_SQL_PREFIX))
&& ex.getMessage().contains(EX_MESSAGE_RESTART_TRANSACTION)) {
this.hiveSqlErrorCounter.increment();
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createDatabase(final Database database) throws TException {
callWrap(HiveMetrics.TagCreateDatabase.getMetricName(), () -> {
handler.create_database(database);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void createTable(final Table table) throws TException {
callWrap(HiveMetrics.TagCreateTable.getMetricName(), () -> {
handler.create_table(table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropDatabase(final String dbName) throws TException {
callWrap(HiveMetrics.TagDropDatabase.getMetricName(), () -> {
handler.drop_database(dbName, false, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
dropHivePartitions(databaseName, tableName, partitionNames);
}
private void dropHivePartitions(final String dbName, final String tableName,
final List<String> partitionNames)
throws TException {
callWrap(HiveMetrics.TagDropHivePartitions.getMetricName(), () -> {
final List<List<String>> dropParts = new ArrayList<>();
for (String partName : partitionNames) {
dropParts.add(new ArrayList<>(PartitionUtil.getPartitionKeyValues(partName).values()));
}
handler.add_drop_partitions(dbName, tableName, Lists.newArrayList(), dropParts, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void alterDatabase(final String databaseName,
final Database database) throws TException {
callWrap(HiveMetrics.TagAlterDatabase.getMetricName(), () -> {
handler.alter_database(databaseName, database);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllDatabases() throws TException {
return callWrap(HiveMetrics.TagGetAllDatabases.getMetricName(), handler::get_all_databases);
}
/**
* {@inheritDoc}.
*/
@Override
public Set<HivePrivilege> getDatabasePrivileges(final String user, final String databaseName) {
return DEFAULT_PRIVILEGES;
}
/**
* {@inheritDoc}.
*/
@Override
public Database getDatabase(final String databaseName) throws TException {
return callWrap(HiveMetrics.TagGetDatabase.getMetricName(), () -> handler.get_database(databaseName));
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllTables(final String databaseName) throws TException {
return callWrap(HiveMetrics.TagGetAllTables.getMetricName(), () -> {
final List<String> tables = handler.get_all_tables(databaseName);
if (tables.isEmpty()) {
handler.get_database(databaseName);
}
return tables;
});
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
return callWrap(HiveMetrics.TagGetTableNamesByFilter.getMetricName(),
() -> handler.get_table_names_by_filter(databaseName, filter, (short) limit));
}
/**
* {@inheritDoc}.
*/
@Override
public Table getTableByName(final String databaseName,
final String tableName) throws TException {
return callWrap(HiveMetrics.TagGetTableByName.getMetricName(), () -> loadTable(databaseName, tableName));
}
private Table loadTable(final String dbName, final String tableName) throws TException {
return callWrap(HiveMetrics.TagLoadTable.getMetricName(), () -> handler.get_table(dbName, tableName));
}
/**
* {@inheritDoc}.
*/
@Override
public void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
callWrap(HiveMetrics.TagAlterTable.getMetricName(), () -> {
handler.alter_table(databaseName, tableName, table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void alterPartitions(final String dbName,
final String tableName,
final List<Partition> partitions) throws TException {
callWrap(HiveMetrics.TagAlterPartitions.getMetricName(), () -> {
handler.alter_partitions(dbName, tableName, partitions);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName,
final String tableName,
final List<Partition> addParts,
final List<String> dropPartNames) throws TException {
callWrap(HiveMetrics.TagAddDropPartitions.getMetricName(), () -> {
final List<List<String>> dropParts = new ArrayList<>();
for (String partName : dropPartNames) {
dropParts.add(new ArrayList<>(PartitionUtil.getPartitionKeyValues(partName).values()));
}
handler.add_drop_partitions(dbName, tableName, addParts, dropParts, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropTable(final String databaseName,
final String tableName) throws TException {
callWrap(HiveMetrics.TagDropTable.getMetricName(), () -> {
handler.drop_table(databaseName, tableName, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public void rename(final String databaseName,
final String oldTableName,
final String newdatabadeName,
final String newTableName) throws TException {
callWrap(HiveMetrics.TagRename.getMetricName(), () -> {
final Table table = new Table(loadTable(databaseName, oldTableName));
table.setDbName(newdatabadeName);
table.setTableName(newTableName);
handler.alter_table(databaseName, oldTableName, table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
return callWrap(HiveMetrics.TagGetPartitions.getMetricName(), () -> {
if (partitionNames != null && !partitionNames.isEmpty()) {
return handler.get_partitions_by_names(databaseName, tableName, partitionNames);
}
return handler.get_partitions(databaseName, tableName, ALL_RESULTS);
});
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(final String databaseName,
final String tableName) throws TException {
return callWrap(HiveMetrics.TagGetPartitionCount.getMetricName(),
() -> getPartitions(databaseName, tableName, null).size());
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
return callWrap(HiveMetrics.TagGetPartitionNames.getMetricName(),
() -> handler.get_partition_names(databaseName, tableName, ALL_RESULTS));
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
return callWrap(HiveMetrics.TagListPartitionsByFilter.getMetricName(),
() -> handler.get_partitions_by_filter(databaseName, tableName, filter, ALL_RESULTS));
}
private <R> R callWrap(final String requestName, final Callable<R> supplier) throws TException {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", requestName);
try {
return supplier.call();
} catch (MetaException e) {
handleSqlException(e);
if (e.getCause() instanceof NucleusDataStoreException) {
throw new ConnectorException(e.getMessage(), e.getCause());
}
throw e;
} catch (TException e) {
handleSqlException(e);
throw e;
} catch (Exception e) {
throw new TException(e.getMessage(), e.getCause());
} finally {
final long duration = registry.clock().wallTime() - start;
log.debug("### Time taken to complete {} is {} ms", requestName,
duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
}
| 9,557 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/HivePersistenceManagerFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
import javax.jdo.PersistenceManagerFactory;
import javax.sql.DataSource;
import java.util.Map;
/**
* HivePersistenceManagerFactory.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public final class HivePersistenceManagerFactory {
private static Map<String, PersistenceManagerFactory> factories = Maps.newConcurrentMap();
private HivePersistenceManagerFactory() {
}
/**
* getPersistenceManagerFactory.
*
* @param props props
* @return PersistenceManagerFactory
*/
public static PersistenceManagerFactory getPersistenceManagerFactory(final Map props) {
final String name = String.valueOf(props.get(HiveConfigConstants.JAVAX_JDO_OPTION_NAME));
PersistenceManagerFactory result = factories.get(name);
if (result == null) {
result = getpersistencemanagerfactory(props);
}
return result;
}
private static synchronized PersistenceManagerFactory getpersistencemanagerfactory(final Map props) {
final String name = String.valueOf(props.get(HiveConfigConstants.JAVAX_JDO_OPTION_NAME));
PersistenceManagerFactory result = factories.get(name);
if (result == null) {
final DataSource dataSource = DataSourceManager.get().get(name);
final Map<String, Object> properties = Maps.newHashMap();
properties.put(HiveConfigConstants.DATANUCLEUS_FIXEDDATASTORE,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_FIXEDDATASTORE, true));
properties.put(HiveConfigConstants.DATANUCLEUS_AUTOCREATESCHEMA,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_AUTOCREATESCHEMA, false));
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS, false));
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO, "None"));
properties.put(HiveConfigConstants.DATANUCLEUS_IDENTIFIERFACTORY,
HiveConfigConstants.DATANUCLEUS_DATANUCLEU1);
properties.put(HiveConfigConstants.DATANUCLEUS_CONNECTIONFACTORY, dataSource);
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY, true);
properties.put(HiveConfigConstants.DATANUCLEUS_TRANSACTIONISOLATION,
HiveConfigConstants.DATANUCLEUS_READCOMMITTED);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATETABLE, false);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATECONSTRAINTS, false);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATECOLUMNS, false);
properties.put(HiveConfigConstants.DATANUCLEUS_CACHE_LEVEL2, false);
properties.put(HiveConfigConstants.DATANUCLEUS_CACHE_LEVEL2_TYPE, "none");
properties.put(HiveConfigConstants.DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT, false);
properties.put(HiveConfigConstants.DATANUCLEUS_AUTOSTARTMECHANISMMODE, "Checked");
properties.put(HiveConfigConstants.DATANUCLEUS_DETACHALLONCOMMIT, true);
properties.put(HiveConfigConstants.DATANUCLEUS_DETACHALLONROLLBACK, true);
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT));
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT));
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT));
result = JDOPersistenceManagerFactory.getPersistenceManagerFactory(properties);
factories.put(name, result);
}
return result;
}
}
| 9,558 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Embedded hive metastore client implementation.
* @author zhenl
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.client.embedded;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,559 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/HivePrivilege.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
/**
* HivePrivilege.
*
* @author zhenl
* @since 1.0.0
*/
public enum HivePrivilege {
/**SELECT.*/
SELECT,
/**INSERT.*/
INSERT,
/**UPDATE.*/
UPDATE,
/**DELETE.*/
DELETE,
/**OWNERSHIP.*/
OWNERSHIP,
/**GRANT.*/
GRANT;
}
| 9,560 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/HMSHandlerProxy.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import com.google.common.base.Throwables;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.spectator.api.Registry;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Deadline;
import org.apache.hadoop.hive.metastore.api.MetaException;
import javax.jdo.JDODataStoreException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.concurrent.TimeUnit;
/**
* HMSHandlerProxy.
*
* @author zhenl
* @since 1.0.0
*/
@NoArgsConstructor
public final class HMSHandlerProxy implements InvocationHandler {
@Setter
private MetacatHMSHandler metacatHMSHandler;
private long timeout = 600000; //600s
private HMSHandlerProxy(final HiveConf hiveConf, final Registry registry) throws MetaException {
metacatHMSHandler =
new MetacatHMSHandler(HiveConfigConstants.HIVE_HMSHANDLER_NAME, hiveConf, registry, false);
metacatHMSHandler.init();
timeout = HiveConf.getTimeVar(hiveConf,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
}
/**
* getProxy.
*
* @param hiveConf hive configuration
* @param registry registry
* @return MetacatHMSHandler
* @throws Exception Exception
*/
public static IMetacatHMSHandler getProxy(final HiveConf hiveConf, final Registry registry)
throws Exception {
final HMSHandlerProxy handler = new HMSHandlerProxy(hiveConf, registry);
return (IMetacatHMSHandler) Proxy.newProxyInstance(
HMSHandlerProxy.class.getClassLoader(),
new Class[]{IMetacatHMSHandler.class}, handler);
}
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
Deadline.registerIfNot(timeout);
try {
Deadline.startTimer(method.getName());
final Object object = method.invoke(metacatHMSHandler, args);
Deadline.stopTimer();
return object;
} catch (InvocationTargetException e) {
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex instanceof JDODataStoreException) {
throw ex;
}
}
throw e.getCause();
}
}
}
| 9,561 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/IMetacatHMSHandler.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.thrift.TException;
import java.util.List;
/**
* IMetacatHMSHandler.
* @author zhenl
* @since 1.0.0
*/
public interface IMetacatHMSHandler extends IHMSHandler {
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws TException any internal exception
*/
@SuppressWarnings({"checkstyle:methodname"})
boolean add_drop_partitions(String databaseName,
String tableName, List<Partition> addParts,
List<List<String>> dropParts, boolean deleteData)
throws TException;
}
| 9,562 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/HiveMetaStoreFsImpl.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.MetaStoreFS;
import org.apache.hadoop.hive.metastore.api.MetaException;
/**
* HiveMetaStoreFsImpl.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveMetaStoreFsImpl implements MetaStoreFS {
@Override
public boolean deleteDir(final FileSystem fileSystem, final Path path,
final boolean b, final boolean b2, final Configuration entries)
throws MetaException {
log.info("No-op call for deleting '{}'", path);
return true;
}
}
| 9,563 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/MetacatHMSHandler.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.metrics.Metrics;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.MetaStoreEndFunctionContext;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.RawStoreProxy;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
/**
* This is an extension of the HiveMetastore. This provides multi-tenancy to the hive metastore.
*
* @author amajumdar
* @since 1.0.0
*/
public class MetacatHMSHandler extends HiveMetaStore.HMSHandler implements IMetacatHMSHandler {
private Pattern partitionValidationPattern;
private int nextSerialNum;
private final Registry registry;
private ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
@Override
protected synchronized Integer initialValue() {
return nextSerialNum++;
}
};
private final ThreadLocal<RawStore> threadLocalMS =
new ThreadLocal<RawStore>() {
@Override
protected synchronized RawStore initialValue() {
return null;
}
};
private final ThreadLocal<Configuration> threadLocalConf =
new ThreadLocal<Configuration>() {
@Override
protected synchronized Configuration initialValue() {
return null;
}
};
/**
* Constructor.
*
* @param name client name
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name) throws MetaException {
this(name, new HiveConf(HiveMetaStore.HMSHandler.class));
}
/**
* Constructor.
*
* @param name client name
* @param conf hive configurations
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name, final HiveConf conf) throws MetaException {
this(name, conf, new NoopRegistry(), true);
}
/**
* Constructor.
*
* @param name client name
* @param conf hive configurations
* @param registry registry
* @param init initialize if true.
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name, final HiveConf conf, final Registry registry, final boolean init)
throws MetaException {
super(name, conf, init);
this.registry = registry;
final String partitionValidationRegex =
getHiveConf().getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN);
if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
}
@Override
public RawStore getMS() throws MetaException {
RawStore ms = threadLocalMS.get();
if (ms == null) {
ms = newRawStore();
ms.verifySchema();
threadLocalMS.set(ms);
ms = threadLocalMS.get();
}
return ms;
}
@Override
public void setConf(final Configuration conf) {
threadLocalConf.set(conf);
final RawStore ms = threadLocalMS.get();
if (ms != null) {
ms.setConf(conf); // reload if DS related configuration is changed
}
}
@Override
public Configuration getConf() {
Configuration conf = threadLocalConf.get();
if (conf == null) {
conf = new Configuration(getHiveConf());
threadLocalConf.set(conf);
}
return conf;
}
private RawStore newRawStore() throws MetaException {
final Configuration conf = getConf();
final String rawStoreClassName = getHiveConf().getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
LOG.info(String.format("%s: Opening raw store with implemenation class: %s", threadLocalId.get(),
rawStoreClassName));
return RawStoreProxy.getProxy(getHiveConf(), conf, rawStoreClassName, threadLocalId.get());
}
private void logInfo(final String m) {
LOG.info(threadLocalId.get().toString() + ": " + m);
}
private String startFunction(final String function, final String extraLogInfo) {
incrementCounter(function);
logInfo((getIpAddress() == null ? "" : "source:" + getIpAddress() + " ") + function + extraLogInfo);
try {
Metrics.startScope(function);
} catch (IOException e) {
LOG.debug("Exception when starting metrics scope"
+ e.getClass().getName() + " " + e.getMessage(), e);
}
return function;
}
private String startFunction(final String function) {
return startFunction(function, "");
}
private void endFunction(final String function, final boolean successful, final Exception e,
final String inputTableName) {
endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
}
private void endFunction(final String function, final MetaStoreEndFunctionContext context) {
try {
Metrics.endScope(function);
} catch (IOException e) {
LOG.debug("Exception when closing metrics scope" + e);
}
}
private static MetaException newMetaException(final Exception e) {
final MetaException me = new MetaException(e.toString());
me.initCause(e);
return me;
}
private static class PartValEqWrapper {
private Partition partition;
/**
* Constructor.
*
* @param partition partition
*/
PartValEqWrapper(final Partition partition) {
this.partition = partition;
}
@Override
public int hashCode() {
return partition.isSetValues() ? partition.getValues().hashCode() : 0;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof PartValEqWrapper)) {
return false;
}
final Partition p1 = this.partition;
final Partition p2 = ((PartValEqWrapper) obj).partition;
if (!p1.isSetValues() || !p2.isSetValues()) {
return p1.isSetValues() == p2.isSetValues();
}
if (p1.getValues().size() != p2.getValues().size()) {
return false;
}
for (int i = 0; i < p1.getValues().size(); ++i) {
final String v1 = p1.getValues().get(i);
final String v2 = p2.getValues().get(i);
if (!Objects.equals(v1, v2)) {
return false;
}
}
return true;
}
}
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws NoSuchObjectException Exception if table does not exists
* @throws MetaException Exception if
* @throws TException any internal exception
*/
@SuppressWarnings({"checkstyle:methodname"})
public boolean add_drop_partitions(final String databaseName,
final String tableName, final List<Partition> addParts,
final List<List<String>> dropParts, final boolean deleteData)
throws NoSuchObjectException, MetaException, TException {
startFunction("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
if (addParts.size() == 0 && dropParts.size() == 0) {
return true;
}
for (List<String> partVals : dropParts) {
LOG.info("Drop Partition values:" + partVals);
}
for (Partition part : addParts) {
LOG.info("Add Partition values:" + part);
}
boolean ret = false;
Exception ex = null;
try {
ret = addDropPartitionsCore(getMS(), databaseName, tableName, addParts, dropParts, false, null);
} catch (Exception e) {
ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
} else if (e instanceof InvalidObjectException) {
throw (InvalidObjectException) e;
} else if (e instanceof AlreadyExistsException) {
throw (AlreadyExistsException) e;
} else if (e instanceof NoSuchObjectException) {
throw (NoSuchObjectException) e;
} else {
throw newMetaException(e);
}
} finally {
endFunction("drop_partitions", ret, ex, tableName);
}
return ret;
}
private boolean addDropPartitionsCore(
final RawStore ms, final String databaseName, final String tableName, final List<Partition> addParts,
final List<List<String>> dropParts, final boolean ifNotExists, final EnvironmentContext envContext)
throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException,
IOException, InvalidInputException, TException {
logInfo("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
boolean success = false;
Table tbl = null;
// Ensures that the list doesn't have dups, and keeps track of directories we have created.
final Map<PartValEqWrapper, Boolean> addedPartitions = new HashMap<PartValEqWrapper, Boolean>();
final List<Partition> existingParts = new ArrayList<Partition>();
List<Partition> result = null;
try {
ms.openTransaction();
tbl = get_table(databaseName, tableName);
if (tbl == null) {
throw new NoSuchObjectException("Unable to add partitions because "
+ "database or table " + databaseName + "." + tableName + " does not exist");
}
// Drop the parts first
dropPartitionsCoreNoTxn(ms, tbl, dropParts);
// Now add the parts
result = addPartitionsCoreNoTxn(ms, tbl, addParts, ifNotExists, addedPartitions, existingParts);
if (!result.isEmpty() && !ms.addPartitions(databaseName, tableName, result)) {
throw new MetaException("Unable to add partitions");
}
success = ms.commitTransaction();
} finally {
if (!success) {
ms.rollbackTransaction();
// Clean up the result of adding partitions
for (Map.Entry<PartValEqWrapper, Boolean> e : addedPartitions.entrySet()) {
if (e.getValue()) {
getWh().deleteDir(new Path(e.getKey().partition.getSd().getLocation()), true);
// we just created this directory - it's not a case of pre-creation, so we nuke
}
}
}
}
return success;
}
private boolean startAddPartition(
final RawStore ms, final Partition part, final boolean ifNotExists) throws MetaException, TException {
MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
partitionValidationPattern);
final boolean doesExist = ms.doesPartitionExist(
part.getDbName(), part.getTableName(), part.getValues());
if (doesExist && !ifNotExists) {
throw new AlreadyExistsException("Partition already exists: " + part);
}
return !doesExist;
}
/**
* Handles the location for a partition being created.
*
* @param tbl Table.
* @param part Partition.
* @return Whether the partition SD location is set to a newly created directory.
*/
private boolean createLocationForAddedPartition(
final Table tbl, final Partition part) throws MetaException {
Path partLocation = null;
String partLocationStr = null;
if (part.getSd() != null) {
partLocationStr = part.getSd().getLocation();
}
if (partLocationStr == null || partLocationStr.isEmpty()) {
// set default location if not specified and this is
// a physical table partition (not a view)
if (tbl.getSd().getLocation() != null) {
partLocation = new Path(tbl.getSd().getLocation(), Warehouse
.makePartName(tbl.getPartitionKeys(), part.getValues()));
}
} else {
if (tbl.getSd().getLocation() == null) {
throw new MetaException("Cannot specify location for a view partition");
}
partLocation = getWh().getDnsPath(new Path(partLocationStr));
}
boolean result = false;
if (partLocation != null) {
part.getSd().setLocation(partLocation.toString());
final boolean doFileSystemCalls = getHiveConf().getBoolean("hive.metastore.use.fs.calls", true)
|| (tbl.getParameters() != null && Boolean.parseBoolean(tbl.getParameters()
.getOrDefault("hive.metastore.use.fs.calls", "false")));
if (doFileSystemCalls) {
// Check to see if the directory already exists before calling
// mkdirs() because if the file system is read-only, mkdirs will
// throw an exception even if the directory already exists.
if (!getWh().isDir(partLocation)) {
//
// Added to track the number of partition locations that do not exist before
// adding the partition metadata
registry.counter(HiveMetrics.CounterHivePartitionPathIsNotDir.getMetricName(),
"database", tbl.getDbName(), "table", tbl.getTableName()).increment();
logInfo(String.format("Partition location %s does not exist for table %s",
partLocation, tbl.getTableName()));
if (!getWh().mkdirs(partLocation, true)) {
throw new MetaException(partLocation + " is not a directory or unable to create one");
}
}
result = true;
}
}
return result;
}
private void initializeAddedPartition(
final Table tbl, final Partition part, final boolean madeDir) throws MetaException {
initializeAddedPartition(tbl, new PartitionSpecProxy.SimplePartitionWrapperIterator(part), madeDir);
}
@SuppressFBWarnings
private void initializeAddedPartition(
final Table tbl, final PartitionSpecProxy.PartitionIterator part,
final boolean madeDir) throws MetaException {
// set create time
final long time = System.currentTimeMillis() / 1000;
part.setCreateTime((int) time);
if (part.getParameters() == null || part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
}
// Inherit table properties into partition properties.
final Map<String, String> tblParams = tbl.getParameters();
final String inheritProps = getHiveConf().getVar(HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
// Default value is empty string in which case no properties will be inherited.
// * implies all properties needs to be inherited
Set<String> inheritKeys = new HashSet<String>(Arrays.asList(inheritProps.split(",")));
if (inheritKeys.contains("*")) {
inheritKeys = tblParams.keySet();
}
for (String key : inheritKeys) {
final String paramVal = tblParams.get(key);
if (null != paramVal) { // add the property only if it exists in table properties
part.putToParameters(key, paramVal);
}
}
}
private List<Partition> addPartitionsCoreNoTxn(
final RawStore ms, final Table tbl, final List<Partition> parts, final boolean ifNotExists,
final Map<PartValEqWrapper, Boolean> addedPartitions, final List<Partition> existingParts)
throws MetaException, InvalidObjectException, AlreadyExistsException, TException {
logInfo("add_partitions");
final String dbName = tbl.getDbName();
final String tblName = tbl.getTableName();
final List<Partition> result = new ArrayList<Partition>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + part);
}
final boolean shouldAdd = startAddPartition(ms, part, ifNotExists);
if (!shouldAdd) {
existingParts.add(part);
LOG.info("Not adding partition " + part + " as it already exists");
continue;
}
final boolean madeDir = createLocationForAddedPartition(tbl, part);
if (addedPartitions.put(new PartValEqWrapper(part), madeDir) != null) {
// Technically, for ifNotExists case, we could insert one and discard the other
// because the first one now "exists", but it seems better to report the problem
// upstream as such a command doesn't make sense.
throw new MetaException("Duplicate partitions in the list: " + part);
}
initializeAddedPartition(tbl, part, madeDir);
result.add(part);
}
return result;
}
private List<Partition> dropPartitionsCoreNoTxn(
final RawStore ms, final Table tbl, final List<List<String>> partsValues)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
final List<Partition> deletedPartitions = new ArrayList<Partition>();
Partition part = null;
final String dbName = tbl.getDbName();
final String tblName = tbl.getTableName();
for (List<String> partValues : partsValues) {
part = ms.getPartition(dbName, tblName, partValues);
if (part == null) {
throw new NoSuchObjectException("Partition doesn't exist. "
+ partValues);
}
if (!ms.dropPartition(dbName, tblName, partValues)) {
throw new MetaException("Unable to drop partition");
}
deletedPartitions.add(part);
}
return deletedPartitions;
}
}
| 9,564 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes to extend hive metastore.
*
* @author amajumdar
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.metastore;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,565 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.metacat.connector.hive.util.PartitionUtil;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* HiveConnectorFastPartitionService.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFastPartitionService extends HiveConnectorPartitionService {
private DirectSqlGetPartition directSqlGetPartition;
private DirectSqlSavePartition directSqlSavePartition;
private Warehouse warehouse;
private Registry registry;
@VisibleForTesting
private IcebergTableHandler icebergTableHandler;
/**
* Constructor.
*
* @param context connector context
* @param metacatHiveClient hive client
* @param warehouse hive warehouse
* @param hiveMetacatConverters hive converter
* @param directSqlGetPartition service to get partitions
* @param directSqlSavePartition service to save partitions
* @param icebergTableHandler iceberg table handler
*/
public HiveConnectorFastPartitionService(
final ConnectorContext context,
final IMetacatHiveClient metacatHiveClient,
final Warehouse warehouse,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlGetPartition directSqlGetPartition,
final DirectSqlSavePartition directSqlSavePartition,
final IcebergTableHandler icebergTableHandler
) {
super(context, metacatHiveClient, hiveMetacatConverters);
this.warehouse = warehouse;
this.directSqlGetPartition = directSqlGetPartition;
this.directSqlSavePartition = directSqlSavePartition;
this.registry = context.getRegistry();
this.icebergTableHandler = icebergTableHandler;
}
/**
* Number of partitions for the given table.
*
* @param tableName tableName
* @return Number of partitions
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final TableInfo tableInfo
) {
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
return directSqlGetPartition.getPartitionCount(requestContext, tableName);
}
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
: directSqlGetPartition.getPartitions(requestContext, tableName, partitionsRequest);
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
.stream().map(info -> info.getName().getPartitionName()).collect(Collectors.toList())
:
directSqlGetPartition.getPartitionKeys(requestContext, tableName, partitionsRequest);
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionUris(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo
) {
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest);
}
/**
* getPartitionNames.
*
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Override
public Map<String, List<QualifiedName>> getPartitionNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return directSqlGetPartition.getPartitionNames(context, uris, prefixSearch);
}
@Override
protected Map<String, PartitionHolder> getPartitionsByNames(final Table table, final List<String> partitionNames) {
//This is internal call, always turn off the auditTable processing
return directSqlGetPartition.getPartitionHoldersByNames(table, partitionNames, true);
}
protected void addUpdateDropPartitions(final QualifiedName tableQName,
final Table table,
final List<String> partitionNames,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionHolders,
final Set<String> deletePartitionNames) {
final boolean useHiveFastServiceForSavePartitions = Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault("hive.use.embedded.sql.save.partitions", "false"))
|| (table.getParameters() != null && Boolean.parseBoolean(table.getParameters()
.getOrDefault("hive.use.embedded.sql.save.partitions", "false")));
if (useHiveFastServiceForSavePartitions) {
final long start = registry.clock().wallTime();
try {
if (!existingPartitionHolders.isEmpty()) {
final List<PartitionInfo> existingPartitionInfos = existingPartitionHolders.stream()
.map(PartitionHolder::getPartitionInfo).collect(Collectors.toList());
copyTableSdToPartitionInfosSd(existingPartitionInfos, table);
createLocationForPartitions(tableQName, existingPartitionInfos, table);
}
copyTableSdToPartitionInfosSd(addedPartitionInfos, table);
createLocationForPartitions(tableQName, addedPartitionInfos, table);
} finally {
registry.timer(registry
.createId(HiveMetrics.TagCreatePartitionLocations.getMetricName()).withTags(tableQName.parts()))
.record(registry.clock().wallTime() - start, TimeUnit.MILLISECONDS);
}
directSqlSavePartition.addUpdateDropPartitions(tableQName, table, addedPartitionInfos,
existingPartitionHolders, deletePartitionNames);
} else {
super.addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos,
existingPartitionHolders, deletePartitionNames);
}
}
private void createLocationForPartitions(final QualifiedName tableQName,
final List<PartitionInfo> partitionInfos, final Table table) {
final boolean doFileSystemCalls = Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault("hive.metastore.use.fs.calls", "true"))
|| (table.getParameters() != null && Boolean.parseBoolean(table.getParameters()
.getOrDefault("hive.metastore.use.fs.calls", "false")));
partitionInfos.forEach(partitionInfo ->
createLocationForPartition(tableQName, partitionInfo, table, doFileSystemCalls));
}
private void createLocationForPartition(final QualifiedName tableQName,
final PartitionInfo partitionInfo,
final Table table,
final boolean doFileSystemCalls) {
String location = partitionInfo.getSerde().getUri();
Path path = null;
if (StringUtils.isBlank(location)) {
if (table.getSd() == null || table.getSd().getLocation() == null) {
throw new InvalidMetaException(tableQName, null);
}
final String partitionName = partitionInfo.getName().getPartitionName();
final List<String> partValues = PartitionUtil
.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
path = new Path(table.getSd().getLocation(), escapedPartName);
} else {
try {
path = warehouse.getDnsPath(new Path(location));
} catch (Exception e) {
throw new InvalidMetaException(String.format("Failed forming partition location; %s", location), e);
}
}
if (path != null) {
location = path.toString();
partitionInfo.getSerde().setUri(location);
if (doFileSystemCalls) {
registry.counter(registry.createId(HiveMetrics.CounterHivePartitionFileSystemCall.getMetricName())
.withTags(tableQName.parts())).increment();
try {
if (!warehouse.isDir(path)) {
//
// Added to track the number of partition locations that do not exist before
// adding the partition metadata
registry.counter(registry.createId(HiveMetrics.CounterHivePartitionPathIsNotDir.getMetricName())
.withTags(tableQName.parts())).increment();
log.info(String.format("Partition location %s does not exist for table %s",
location, tableQName));
if (!warehouse.mkdirs(path, false)) {
throw new InvalidMetaException(String
.format("%s is not a directory or unable to create one", location), null);
}
}
} catch (Exception e) {
throw new InvalidMetaException(String.format("Failed creating partition location; %s", location),
e);
}
}
}
}
private void copyTableSdToPartitionInfosSd(final List<PartitionInfo> partitionInfos, final Table table) {
//
// Update the partition info based on that of the table.
//
for (PartitionInfo partitionInfo : partitionInfos) {
copyTableSdToPartitionInfoSd(partitionInfo, table);
}
}
private void copyTableSdToPartitionInfoSd(final PartitionInfo partitionInfo, final Table table) {
StorageInfo sd = partitionInfo.getSerde();
//
// Partitions can be provided in the request without the storage information.
//
if (sd == null) {
sd = new StorageInfo();
partitionInfo.setSerde(sd);
}
final StorageDescriptor tableSd = table.getSd();
if (StringUtils.isBlank(sd.getInputFormat())) {
sd.setInputFormat(tableSd.getInputFormat());
}
if (StringUtils.isBlank(sd.getOutputFormat())) {
sd.setOutputFormat(tableSd.getOutputFormat());
}
if (sd.getParameters() == null || sd.getParameters().isEmpty()) {
sd.setParameters(tableSd.getParameters());
}
final SerDeInfo tableSerde = tableSd.getSerdeInfo();
if (tableSerde != null) {
if (StringUtils.isBlank(sd.getSerializationLib())) {
sd.setSerializationLib(tableSerde.getSerializationLib());
}
if (sd.getSerdeInfoParameters() == null || sd.getSerdeInfoParameters().isEmpty()) {
sd.setSerdeInfoParameters(tableSerde.getParameters());
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public void deletePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo
) {
//TODO: implemented as next step
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
//The direct sql based deletion doesn't check if the partition is valid
if (Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault(HiveConfigConstants.USE_FAST_DELETION, "false"))) {
directSqlSavePartition.delete(tableName, partitionNames);
} else {
//will throw exception if the partitions are invalid
super.deletePartitions(requestContext, tableName, partitionNames, tableInfo);
}
}
/**
* get iceberg table partition summary.
*
* @param tableInfo table info
* @param partitionsRequest partition request
* @return iceberg partition name and metrics mapping
*/
private List<PartitionInfo> getIcebergPartitionInfos(
final TableInfo tableInfo,
final PartitionListRequest partitionsRequest) {
return ConnectorUtils.paginate(
icebergTableHandler.getPartitions(
tableInfo,
context,
partitionsRequest.getFilter(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getSort()
),
partitionsRequest.getPageable()
);
}
}
| 9,566 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastDatabaseService.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
/**
* This class provides the database service using direct sql.
*
* @author amajumdar
* @since 1.3.0
*/
public class HiveConnectorFastDatabaseService extends HiveConnectorDatabaseService {
private final DirectSqlDatabase directSqlDatabase;
/**
* Constructor.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
* @param directSqlDatabase database sql data service
*/
public HiveConnectorFastDatabaseService(final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlDatabase directSqlDatabase) {
super(metacatHiveClient, hiveMetacatConverters);
this.directSqlDatabase = directSqlDatabase;
}
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
directSqlDatabase.update(databaseInfo);
}
}
| 9,567 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlTable.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.MapDifference;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to get/set table metadata.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlTable {
/**
* Defines the table type.
*/
public static final String PARAM_TABLE_TYPE = "table_type";
/**
* Defines the current metadata location of the iceberg table.
*/
public static final String PARAM_METADATA_LOCATION = "metadata_location";
/**
* Defines the previous metadata location of the iceberg table.
*/
public static final String PARAM_PREVIOUS_METADATA_LOCATION = "previous_metadata_location";
/**
* Defines the current partition spec expression of the iceberg table.
*/
public static final String PARAM_PARTITION_SPEC = "partition_spec";
/**
* Iceberg table type.
*/
public static final String ICEBERG_TABLE_TYPE = "ICEBERG";
/**
* VIRTUAL_VIEW table type.
*/
public static final String VIRTUAL_VIEW_TABLE_TYPE = "VIRTUAL_VIEW";
/**
* Defines the metadata content of the iceberg table.
*/
public static final String PARAM_METADATA_CONTENT = "metadata_content";
/**
* List of parameter that needs to be excluded when updating an iceberg table.
*/
public static final Set<String> TABLE_EXCLUDED_PARAMS =
ImmutableSet.of(PARAM_PARTITION_SPEC, PARAM_METADATA_CONTENT);
private static final String COL_PARAM_KEY = "param_key";
private static final String COL_PARAM_VALUE = "param_value";
private final Registry registry;
private final JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
private final String catalogName;
private final DirectSqlSavePartition directSqlSavePartition;
private final Warehouse warehouse;
private final Config config;
/**
* Constructor.
*
* @param connectorContext server context
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
* @param directSqlSavePartition direct sql partition service
* @param warehouse warehouse
*/
public DirectSqlTable(
final ConnectorContext connectorContext,
final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric,
final DirectSqlSavePartition directSqlSavePartition,
final Warehouse warehouse
) {
this.catalogName = connectorContext.getCatalogName();
this.registry = connectorContext.getRegistry();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
this.directSqlSavePartition = directSqlSavePartition;
this.warehouse = warehouse;
this.config = connectorContext.getConfig();
}
/**
* Returns the Jdbc connection of the underlying database.
*
* @return the Jdbc connection of the underlying database
* @throws SQLException if the connection could not be fetched
* @throws NullPointerException if no data source has been configured
*/
@SuppressFBWarnings("NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE")
public Connection getConnection() throws SQLException {
return jdbcTemplate.getDataSource().getConnection();
}
/**
* Returns true if table exists with the given name.
*
* @param name table name
* @return true if table exists with the given name.
*/
@Transactional(readOnly = true)
public boolean exists(final QualifiedName name) {
final long start = registry.clock().wallTime();
boolean result = false;
try {
final Object qResult = jdbcTemplate.queryForObject(SQL.EXIST_TABLE_BY_NAME,
new String[]{name.getDatabaseName(), name.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR}, Integer.class);
if (qResult != null) {
result = true;
}
} catch (EmptyResultDataAccessException e) {
log.debug("Table {} does not exist.", name);
return false;
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagTableExists.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
/**
* Returns all the table names referring to the given <code>uris</code>.
*
* @param uris locations
* @param prefixSearch if true, we look for tables whose location starts with the given <code>uri</code>
* @return map of uri to list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
@Transactional(readOnly = true)
public Map<String, List<QualifiedName>> getTableNames(final List<String> uris, final boolean prefixSearch) {
final long start = registry.clock().wallTime();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL.GET_TABLE_NAMES_BY_URI);
final List<SqlParameterValue> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" and (1=0");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(new SqlParameterValue(Types.VARCHAR, uri + "%"));
});
queryBuilder.append(" )");
} else {
queryBuilder.append(" and location in (");
uris.forEach(uri -> {
queryBuilder.append("?,");
params.add(new SqlParameterValue(Types.VARCHAR, uri));
});
queryBuilder.deleteCharAt(queryBuilder.length() - 1).append(")");
}
ResultSetExtractor<Map<String, List<QualifiedName>>> handler = rs -> {
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String uri = rs.getString("location");
final List<QualifiedName> names = result.computeIfAbsent(uri, k -> Lists.newArrayList());
names.add(QualifiedName.ofTable(catalogName, schemaName, tableName));
}
return result;
};
try {
return jdbcTemplate.query(queryBuilder.toString(), params.toArray(), handler);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetTableNames.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Locks and updates the iceberg table for update so that no other request can modify the table at the same time.
* 1. Gets the table parameters and locks the requested records. If lock cannot be attained,
* the request to update fails
* 2. Validates the metadata location
* 3. If validated, updates the table parameters.
* @param tableInfo table info
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void updateIcebergTable(final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
final Map<String, String> newTableMetadata = tableInfo.getMetadata();
//
// Table info should have the table parameters with the metadata location.
//
HiveTableUtil.throwIfTableMetadataNullOrEmpty(tableName, newTableMetadata);
//
// If the previous metadata location is not empty, check if it is valid.
//
final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
if (config.isIcebergPreviousMetadataLocationCheckEnabled() && !StringUtils.isBlank(previousMetadataLocation)) {
boolean doesPathExists = true;
try {
final Path previousMetadataPath = new Path(previousMetadataLocation);
doesPathExists = warehouse.getFs(previousMetadataPath).exists(previousMetadataPath);
} catch (Exception ignored) {
log.warn(String.format("Failed getting the filesystem for %s", previousMetadataLocation));
registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
}
if (!doesPathExists) {
throw new InvalidMetaException(tableName,
String.format("Invalid metadata for %s..Location %s does not exist",
tableName, previousMetadataLocation), null);
}
}
final Long tableId = getTableId(tableName);
Map<String, String> existingTableMetadata = null;
log.debug("Lock Iceberg table {}", tableName);
try {
existingTableMetadata = jdbcTemplate.query(SQL.TABLE_PARAMS_LOCK,
new SqlParameterValue[]{new SqlParameterValue(Types.BIGINT, tableId)}, rs -> {
final Map<String, String> result = Maps.newHashMap();
while (rs.next()) {
result.put(rs.getString(COL_PARAM_KEY), rs.getString(COL_PARAM_VALUE));
}
return result;
});
} catch (EmptyResultDataAccessException ex) {
log.info(String.format("No parameters defined for iceberg table %s", tableName));
} catch (Exception ex) {
final String message = String.format("Failed getting a lock on iceberg table %s", tableName);
log.warn(message, ex);
throw new InvalidMetaException(tableName, message, null);
}
if (existingTableMetadata == null) {
existingTableMetadata = Maps.newHashMap();
}
final boolean needUpdate = validateIcebergUpdate(tableName, existingTableMetadata, newTableMetadata);
final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
final String newMetadataLocation = newTableMetadata.get(PARAM_METADATA_LOCATION);
log.info("Servicing Iceberg commit request with tableId: {}, needUpdate: {}, "
+ "previousLocation: {}, existingLocation: {}, newLocation: {}",
tableId, needUpdate, previousMetadataLocation, existingMetadataLocation, newMetadataLocation);
if (needUpdate) {
final MapDifference<String, String> diff = Maps.difference(existingTableMetadata, newTableMetadata);
insertTableParams(tableId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateTableParams(tableId, updateParams);
//
// In addition to updating the table params, the table location in HMS needs to be updated for usage by
// external tools, that access HMS directly
//
updateTableLocation(tableId, tableInfo);
log.info("Finished updating Iceberg table with tableId: {}", tableId);
}
log.debug("Unlocked Iceberg table {}", tableName);
}
private void validateTableType(final QualifiedName tableName, final Map<String, String> tableMetadata) {
if (!tableMetadata.isEmpty()) {
if (ICEBERG_TABLE_TYPE.equalsIgnoreCase(tableMetadata.get(PARAM_TABLE_TYPE))) {
return;
}
if (MetacatUtils.isCommonView(tableMetadata)) {
return;
}
}
final String message = String.format("Originally table %s is neither iceberg table nor common view", tableName);
log.info(message);
throw new InvalidMetaException(tableName, message, null);
}
private boolean validateIcebergUpdate(final QualifiedName tableName,
final Map<String, String> existingTableMetadata,
final Map<String, String> newTableMetadata) {
// Validate the type of the table stored in the RDS
validateTableType(tableName, existingTableMetadata);
final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
final String newMetadataLocation = newTableMetadata.get(DirectSqlTable.PARAM_METADATA_LOCATION);
//
// 1. If stored metadata location is empty then the table is not in a valid state.
// 2. If previous metadata location is not provided then the request is invalid.
// 3. If the provided previous metadata location does not match the saved metadata location, then the table
// update should fail.
//
boolean needUpdate = false;
if (StringUtils.isBlank(existingMetadataLocation)) {
final String message = String
.format("Invalid metadata location for iceberg table %s. Existing location is empty.",
tableName);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
} else if (!Objects.equals(existingMetadataLocation, newMetadataLocation)) {
if (StringUtils.isBlank(previousMetadataLocation)) {
final String message = String.format(
"Invalid metadata location for iceberg table %s. Provided previous metadata location is empty.",
tableName);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
} else if (!Objects.equals(existingMetadataLocation, previousMetadataLocation)) {
final String message =
String.format("Invalid metadata location for iceberg table %s (expected:%s, provided:%s)",
tableName, existingMetadataLocation, previousMetadataLocation);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
}
needUpdate = true;
}
return needUpdate;
}
private void updateTableLocation(final Long tableId, final TableInfo tableInfo) {
final String uri = tableInfo.getSerde() != null ? tableInfo.getSerde().getUri() : null;
if (!Strings.isNullOrEmpty(uri)) {
jdbcTemplate.update(SQL.UPDATE_SDS_LOCATION, new SqlParameterValue(Types.VARCHAR, uri),
new SqlParameterValue(Types.BIGINT, tableId), new SqlParameterValue(Types.VARCHAR, uri));
}
}
private void insertTableParams(final Long tableId, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.filter(s -> !TABLE_EXCLUDED_PARAMS.contains(s.getKey()))
.map(s -> new Object[]{tableId, s.getKey(), s.getValue()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.INSERT_TABLE_PARAMS, paramsList,
new int[]{Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
}
}
private void updateTableParams(final Long tableId, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.filter(s -> !TABLE_EXCLUDED_PARAMS.contains(s.getKey()))
.map(s -> new Object[]{s.getValue(), tableId, s.getKey()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.UPDATE_TABLE_PARAMS, paramsList,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
}
}
/**
* Returns the table internal id.
* @param tableName table name
* @return table id
*/
@Transactional(readOnly = true)
public Long getTableId(final QualifiedName tableName) {
try {
return jdbcTemplate.queryForObject(SQL.GET_TABLE_ID,
new String[]{tableName.getDatabaseName(), tableName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR}, Long.class);
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(tableName);
}
}
/**
* Deletes all the table related information from the store.
* @param tableName table name
*/
public void delete(final QualifiedName tableName) {
try {
final TableSequenceIds ids = getSequenceIds(tableName);
directSqlSavePartition.delete(tableName);
jdbcTemplate.update(SQL.UPDATE_SDS_CD, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.UPDATE_SDS_SERDE, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
//
// Ignore the error. We should be ignoring the error when table does not exist.
// In certain hive metastore versions, these tables might not be present.
// TODO: Better handle this non-existing tables.
//
try {
jdbcTemplate.update(SQL.DELETE_COLUMNS_OLD, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table COLUMNS_OLD does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_PRIVS does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_COL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_COL_PRIVS does not exist.");
}
jdbcTemplate.update(SQL.DELETE_COLUMNS_V2, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_CDS, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_PARTITION_KEYS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TABLE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TAB_COL_STATS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.UPDATE_TABLE_SD, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_NAMES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_BUCKETING_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SORT_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SD_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_VALUE_LOC_MAP,
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_VALUES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SERDE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SERDES, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SDS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_TBLS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException e) {
throw new ConnectorException(String.format("Failed delete hive table %s", tableName), e);
}
}
private TableSequenceIds getSequenceIds(final QualifiedName tableName) {
try {
return jdbcTemplate.queryForObject(
SQL.TABLE_SEQUENCE_IDS,
new Object[]{tableName.getDatabaseName(), tableName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR},
(rs, rowNum) -> new TableSequenceIds(rs.getLong("tbl_id"), rs.getLong("cd_id"),
rs.getLong("sd_id"), rs.getLong("serde_id")));
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(tableName);
}
}
@VisibleForTesting
private static class SQL {
static final String GET_TABLE_NAMES_BY_URI =
"select d.name schema_name, t.tbl_name table_name, s.location"
+ " from DBS d, TBLS t, SDS s where d.DB_ID=t.DB_ID and t.sd_id=s.sd_id";
static final String EXIST_TABLE_BY_NAME =
"select 1 from DBS d join TBLS t on d.DB_ID=t.DB_ID where d.name=? and t.tbl_name=?";
static final String GET_TABLE_ID =
"select t.tbl_id from DBS d join TBLS t on d.DB_ID=t.DB_ID where d.name=? and t.tbl_name=?";
static final String TABLE_PARAM_LOCK =
"SELECT param_value FROM TABLE_PARAMS WHERE tbl_id=? and param_key=? FOR UPDATE";
static final String TABLE_PARAMS_LOCK =
"SELECT param_key, param_value FROM TABLE_PARAMS WHERE tbl_id=? FOR UPDATE";
static final String UPDATE_TABLE_PARAMS =
"update TABLE_PARAMS set param_value=? WHERE tbl_id=? and param_key=?";
static final String INSERT_TABLE_PARAMS =
"insert into TABLE_PARAMS(tbl_id,param_key,param_value) values (?,?,?)";
static final String UPDATE_SDS_LOCATION =
"UPDATE SDS s join TBLS t on s.sd_id=t.sd_id SET s.LOCATION=? WHERE t.TBL_ID=? and s.LOCATION != ?";
static final String UPDATE_SDS_CD = "UPDATE SDS SET CD_ID=? WHERE SD_ID=?";
static final String DELETE_COLUMNS_OLD = "DELETE FROM COLUMNS_OLD WHERE SD_ID=?";
static final String DELETE_COLUMNS_V2 = "DELETE FROM COLUMNS_V2 WHERE CD_ID=?";
static final String DELETE_CDS = "DELETE FROM CDS WHERE CD_ID=?";
static final String DELETE_PARTITION_KEYS = "DELETE FROM PARTITION_KEYS WHERE TBL_ID=?";
static final String DELETE_TABLE_PARAMS = "DELETE FROM TABLE_PARAMS WHERE TBL_ID=?";
static final String DELETE_TAB_COL_STATS = "DELETE FROM TAB_COL_STATS WHERE TBL_ID=?";
static final String UPDATE_TABLE_SD = "UPDATE TBLS SET SD_ID=? WHERE TBL_ID=?";
static final String DELETE_SKEWED_COL_NAMES = "DELETE FROM SKEWED_COL_NAMES WHERE SD_ID=?";
static final String DELETE_BUCKETING_COLS = "DELETE FROM BUCKETING_COLS WHERE SD_ID=?";
static final String DELETE_SORT_COLS = "DELETE FROM SORT_COLS WHERE SD_ID=?";
static final String DELETE_SD_PARAMS = "DELETE FROM SD_PARAMS WHERE SD_ID=?";
static final String DELETE_SKEWED_COL_VALUE_LOC_MAP = "DELETE FROM SKEWED_COL_VALUE_LOC_MAP WHERE SD_ID=?";
static final String DELETE_SKEWED_VALUES = "DELETE FROM SKEWED_VALUES WHERE SD_ID_OID=?";
static final String UPDATE_SDS_SERDE = "UPDATE SDS SET SERDE_ID=? WHERE SD_ID=?";
static final String DELETE_SERDE_PARAMS = "DELETE FROM SERDE_PARAMS WHERE SERDE_ID=?";
static final String DELETE_SERDES = "DELETE FROM SERDES WHERE SERDE_ID=?";
static final String DELETE_SDS = "DELETE FROM SDS WHERE SD_ID=?";
static final String DELETE_TBL_PRIVS = "DELETE FROM TBL_PRIVS WHERE TBL_ID=?";
static final String DELETE_TBL_COL_PRIVS = "DELETE FROM TBL_COL_PRIVS WHERE TBL_ID=?";
static final String DELETE_TBLS = "DELETE FROM TBLS WHERE TBL_ID=?";
static final String TABLE_SEQUENCE_IDS = "select t.tbl_id, s.sd_id, s.cd_id, s.serde_id"
+ " from DBS d join TBLS t on d.db_id=t.db_id join SDS s on t.sd_id=s.sd_id"
+ " where d.name=? and t.tbl_name=?";
}
}
| 9,568 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.spectator.api.Registry;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
/**
* HiveConnectorFastTableService.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFastTableService extends HiveConnectorTableService {
private final Registry registry;
@Getter
private final DirectSqlTable directSqlTable;
private final IcebergTableHandler icebergTableHandler;
private final CommonViewHandler commonViewHandler;
private final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy;
/**
* Constructor.
*
* @param catalogName catalog name
* @param metacatHiveClient hive client
* @param hiveConnectorDatabaseService databaseService
* @param hiveMetacatConverters hive converter
* @param connectorContext serverContext
* @param directSqlTable Table jpa service
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @param hiveConnectorFastTableServiceProxy hive connector fast table service proxy
*/
public HiveConnectorFastTableService(
final String catalogName,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final HiveConnectorInfoConverter hiveMetacatConverters,
final ConnectorContext connectorContext,
final DirectSqlTable directSqlTable,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler,
final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy
) {
super(catalogName, metacatHiveClient, hiveConnectorDatabaseService, hiveMetacatConverters, connectorContext);
this.registry = connectorContext.getRegistry();
this.directSqlTable = directSqlTable;
this.icebergTableHandler = icebergTableHandler;
this.commonViewHandler = commonViewHandler;
this.hiveConnectorFastTableServiceProxy = hiveConnectorFastTableServiceProxy;
}
/**
* Returns the Jdbc connection of the underlying database.
*
* @return the Jdbc connection of the underlying database
* @throws SQLException is the connection could not be fetched
*/
public Connection getConnection() throws SQLException {
return directSqlTable.getConnection();
}
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
try {
super.create(requestContext, tableInfo);
} catch (InvalidMetaException e) {
throw handleException(e);
}
}
private RuntimeException handleException(final RuntimeException e) {
//
// On table creation, hive metastore validates the table location.
// On iceberg table get and update, the iceberg method uses the metadata location.
// On both occasions, FileSystem uses a relevant file system based on the location scheme. Noticed an error
// where the s3 client's pool closed abruptly. This causes subsequent request to the s3 client to fail.
// FileSystem caches the file system instances.
// The fix is to clear the FileSystem cache so that it can recreate the file system instances.
//
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex instanceof IllegalStateException && ex.getMessage().contains("Connection pool shut down")) {
log.warn("File system connection pool is down. It will be restarted.");
registry.counter(HiveMetrics.CounterHiveFileSystemFailure.getMetricName()).increment();
try {
FileSystem.closeAll();
} catch (Exception fe) {
log.warn("Failed closing the file system.", fe);
}
Throwables.propagate(ex);
}
}
throw e;
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
return directSqlTable.exists(name);
}
/**
* getTable.
*
* @param requestContext The request context
* @param name The qualified name of the resource to get
* @return table dto
*/
@Override
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final TableInfo info = super.get(requestContext, name);
if (connectorContext.getConfig().isCommonViewEnabled()
&& HiveTableUtil.isCommonView(info)) {
final String tableLoc = HiveTableUtil.getCommonViewMetadataLocation(info);
return hiveConnectorFastTableServiceProxy.getCommonViewTableInfo(name, tableLoc, info,
new HiveTypeConverter(), connectorContext.getConfig().isIcebergCacheEnabled());
}
if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) {
return info;
}
// Return the iceberg table with just the metadata location included.
if (connectorContext.getConfig().shouldFetchOnlyMetadataLocationEnabled()
&& requestContext.isIncludeMetadataLocationOnly()) {
return info;
}
final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info);
final TableInfo result = hiveConnectorFastTableServiceProxy.getIcebergTable(name, tableLoc, info,
requestContext.isIncludeMetadata(), connectorContext.getConfig().isIcebergCacheEnabled());
// Renamed tables could still be cached with the old table name.
// Set it to the qName in the request.
result.setName(name);
return result;
} catch (IllegalStateException e) {
throw handleException(e);
}
}
@Override
public Map<String, List<QualifiedName>> getTableNames(
final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch
) {
return directSqlTable.getTableNames(uris, prefixSearch);
}
/**
* Update a table with the given metadata.
*
* If table is an iceberg table, then lock the table for update so that no other request can update it. If the meta
* information is invalid, then throw an error.
* If table is not an iceberg table, then do a regular table update.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
try {
if (HiveTableUtil.isIcebergTable(tableInfo)) {
icebergTableHandler.handleUpdate(requestContext, this.directSqlTable, tableInfo);
} else if (connectorContext.getConfig().isCommonViewEnabled()
&& HiveTableUtil.isCommonView(tableInfo)) {
final QualifiedName tableName = tableInfo.getName();
HiveTableUtil.throwIfTableMetadataNullOrEmpty(tableName, tableInfo.getMetadata());
final String tableMetadataLocation = HiveTableUtil.getCommonViewMetadataLocation(tableInfo);
commonViewHandler.handleUpdate(requestContext, this.directSqlTable,
tableInfo, tableMetadataLocation);
} else {
super.update(requestContext, tableInfo);
}
} catch (IllegalStateException e) {
throw handleException(e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
directSqlTable.delete(name);
}
}
| 9,569 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlSavePartition.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.PartitionUtil;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.Table;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import java.sql.Types;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to save hive partitions.
*
* @author amajumdar
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlSavePartition {
private static final String PARAM_LAST_DDL_TIME = "transient_lastDdlTime";
private final Registry registry;
private final String catalogName;
private final int batchSize;
private final JdbcTemplate jdbcTemplate;
private final SequenceGeneration sequenceGeneration;
private final HiveConnectorFastServiceMetric fastServiceMetric;
/**
* Constructor.
*
* @param connectorContext connector context
* @param jdbcTemplate JDBC template
* @param sequenceGeneration sequence generator
* @param fastServiceMetric fast service metric
*/
public DirectSqlSavePartition(final ConnectorContext connectorContext, final JdbcTemplate jdbcTemplate,
final SequenceGeneration sequenceGeneration,
final HiveConnectorFastServiceMetric fastServiceMetric) {
this.registry = connectorContext.getRegistry();
this.catalogName = connectorContext.getCatalogName();
this.batchSize = connectorContext.getConfig().getHiveMetastoreBatchSize();
this.jdbcTemplate = jdbcTemplate;
this.sequenceGeneration = sequenceGeneration;
this.fastServiceMetric = fastServiceMetric;
}
/**
* Inserts the partitions.
* Note: Column descriptor of the partitions will be set to that of the table.
*
* @param tableQName table name
* @param table hive table
* @param partitions list of partitions
*/
public void insert(final QualifiedName tableQName, final Table table, final List<PartitionInfo> partitions) {
final long start = registry.clock().wallTime();
try {
// Get the table id and column id
final TableSequenceIds tableSequenceIds = getTableSequenceIds(table.getDbName(), table.getTableName());
// Get the sequence ids and lock the records in the database
final PartitionSequenceIds partitionSequenceIds =
this.getPartitionSequenceIds(partitions.size());
final List<List<PartitionInfo>> subPartitionList = Lists.partition(partitions, batchSize);
// Use the current time for create and update time.
final long currentTimeInEpoch = Instant.now().getEpochSecond();
int index = 0;
// Insert the partitions in batches
for (List<PartitionInfo> subPartitions : subPartitionList) {
_insert(tableQName, table, tableSequenceIds, partitionSequenceIds, subPartitions, currentTimeInEpoch,
index);
index += batchSize;
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAddPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
private PartitionSequenceIds getPartitionSequenceIds(final int size) {
return new PartitionSequenceIds(sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_PARTITION),
sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_SDS),
sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_SERDES));
}
@SuppressWarnings("checkstyle:methodname")
private void _insert(final QualifiedName tableQName, final Table table, final TableSequenceIds tableSequenceIds,
final PartitionSequenceIds partitionSequenceIds, final List<PartitionInfo> partitions,
final long currentTimeInEpoch, final int index) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<Object[]> partitionKeyValsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
int currentIndex = index;
for (PartitionInfo partition : partitions) {
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionSequenceIds.getPartId() + currentIndex;
final long sdsId = partitionSequenceIds.getSdsId() + currentIndex;
final long serdeId = partitionSequenceIds.getSerdeId() + currentIndex;
final String partitionName = partition.getName().getPartitionName();
final List<String> partValues = PartitionUtil.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
partitionsValues.add(new Object[]{0, tableSequenceIds.getTableId(), currentTimeInEpoch,
sdsId, escapedPartName, partId, });
for (int i = 0; i < partValues.size(); i++) {
partitionKeyValsValues.add(new Object[]{partId, partValues.get(i), i});
}
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters
.forEach((key, value) -> partitionParamsValues.add(new Object[]{value, partId, key}));
}
partitionParamsValues.add(new Object[]{currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME});
if (storageInfo != null) {
serdesValues.add(new Object[]{null, storageInfo.getSerializationLib(), serdeId});
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters
.forEach((key, value) -> serdeParamsValues.add(new Object[]{value, serdeId, key}));
}
sdsValues.add(new Object[]{storageInfo.getOutputFormat(), false, tableSequenceIds.getCdId(),
false, serdeId, storageInfo.getUri(), storageInfo.getInputFormat(), 0, sdsId, });
}
partitionNames.add(partitionName);
currentIndex++;
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_INSERT, serdesValues,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT, serdeParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.SDS_INSERT, sdsValues,
new int[]{Types.VARCHAR, Types.BOOLEAN, Types.BIGINT, Types.BOOLEAN,
Types.BIGINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.BIGINT, });
jdbcTemplate.batchUpdate(SQL.PARTITIONS_INSERT, partitionsValues,
new int[]{Types.INTEGER, Types.BIGINT, Types.INTEGER, Types.BIGINT, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT, partitionParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.PARTITION_KEY_VALS_INSERT, partitionKeyValsValues,
new int[]{Types.BIGINT, Types.VARCHAR, Types.INTEGER});
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed inserting partitions %s for table %s", partitionNames, tableQName), e);
}
}
private TableSequenceIds getTableSequenceIds(final String dbName, final String tableName) {
try {
return jdbcTemplate.queryForObject(SQL.TABLE_SELECT,
new SqlParameterValue[]{new SqlParameterValue(Types.VARCHAR, dbName),
new SqlParameterValue(Types.VARCHAR, tableName), },
(rs, rowNum) -> new TableSequenceIds(rs.getLong("tbl_id"), rs.getLong("cd_id")));
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(QualifiedName.ofTable(catalogName, dbName, tableName));
} catch (Exception e) {
throw new ConnectorException(String.format("Failed getting the sequence id for table %s", tableName), e);
}
}
/**
* Updates the existing partitions. This method assumes that the partitions already exists and so does not
* validate to check if it exists.
* Note: Column descriptor of the partitions will not be updated.
*
* @param tableQName table name
* @param partitionHolders list of partitions
*/
public void update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders) {
final long start = registry.clock().wallTime();
try {
final List<List<PartitionHolder>> subPartitionDetailList = Lists.partition(partitionHolders, batchSize);
final long currentTimeInEpoch = Instant.now().getEpochSecond();
for (List<PartitionHolder> subPartitionHolders : subPartitionDetailList) {
_update(tableQName, subPartitionHolders, currentTimeInEpoch);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAlterPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@SuppressWarnings("checkstyle:methodname")
private void _update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders,
final long currentTimeInEpoch) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
for (PartitionHolder partitionHolder : partitionHolders) {
final PartitionInfo partition = partitionHolder.getPartitionInfo();
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionHolder.getId();
final long sdsId = partitionHolder.getSdId();
final long serdeId = partitionHolder.getSerdeId();
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters
.forEach((key, value) -> partitionParamsValues.add(new Object[]{value, partId, key, value}));
}
partitionParamsValues.add(
new Object[]{currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME, currentTimeInEpoch});
if (storageInfo != null) {
serdesValues.add(new Object[]{null, storageInfo.getSerializationLib(), serdeId});
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters
.forEach((key, value) -> serdeParamsValues.add(new Object[]{value, serdeId, key, value}));
}
sdsValues.add(new Object[]{storageInfo.getOutputFormat(), false, false, storageInfo.getUri(),
storageInfo.getInputFormat(), sdsId, });
}
partitionNames.add(partition.getName().toString());
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_UPDATE, serdesValues,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT_UPDATE, serdeParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.SDS_UPDATE, sdsValues,
new int[]{Types.VARCHAR, Types.BOOLEAN, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT_UPDATE, partitionParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed updating partitions %s for table %s", partitionNames, tableQName), e);
}
}
/**
* Delete the partitions with the given <code>partitionNames</code>.
*
* @param tableQName table name
* @param partitionNames list of partition ids
*/
public void delete(final QualifiedName tableQName, final List<String> partitionNames) {
final long start = registry.clock().wallTime();
try {
final List<List<String>> subPartitionNameList = Lists.partition(partitionNames, batchSize);
subPartitionNameList.forEach(subPartitionNames -> _delete(tableQName, subPartitionNames));
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagDropHivePartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@SuppressWarnings("checkstyle:methodname")
private void _delete(final QualifiedName tableQName, final List<String> partitionNames) {
try {
final List<PartitionSequenceIds> partitionSequenceIds = getPartitionSequenceIds(tableQName, partitionNames);
if (partitionSequenceIds != null && !partitionSequenceIds.isEmpty()) {
_delete(partitionSequenceIds);
}
} catch (EmptyResultDataAccessException ignored) {
log.debug("None of the table {} partitions {} exist for dropping.", tableQName, partitionNames, ignored);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed dropping table %s partitions: %s", tableQName, partitionNames), e);
}
}
private List<PartitionSequenceIds> getPartitionSequenceIds(final QualifiedName tableName,
final List<String> partitionNames) {
final List<String> paramVariables = partitionNames.stream().map(s -> "?").collect(Collectors.toList());
final String paramVariableString = Joiner.on(",").skipNulls().join(paramVariables);
final SqlParameterValue[] values = new SqlParameterValue[partitionNames.size() + 2];
int index = 0;
values[index++] = new SqlParameterValue(Types.VARCHAR, tableName.getDatabaseName());
values[index++] = new SqlParameterValue(Types.VARCHAR, tableName.getTableName());
for (String partitionName : partitionNames) {
values[index++] = new SqlParameterValue(Types.VARCHAR, partitionName);
}
return jdbcTemplate.query(
String.format(SQL.PARTITIONS_SELECT, paramVariableString), values,
(rs, rowNum) -> new PartitionSequenceIds(rs.getLong("part_id"), rs.getLong("sd_id"),
rs.getLong("serde_id")));
}
/**
* Delete all the partitions for the given table <code>tableQName</code>.
*
* @param tableQName table name
*/
public void delete(final QualifiedName tableQName) {
final long start = registry.clock().wallTime();
try {
List<PartitionSequenceIds> partitionSequenceIds = getPartitionSequenceIds(tableQName);
while (!partitionSequenceIds.isEmpty()) {
_delete(partitionSequenceIds);
partitionSequenceIds = getPartitionSequenceIds(tableQName);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagDropHivePartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
private List<PartitionSequenceIds> getPartitionSequenceIds(final QualifiedName tableQName) {
return jdbcTemplate.query(
String.format(SQL.PARTITIONS_SELECT_ALL, this.batchSize),
new Object[]{tableQName.getDatabaseName(), tableQName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR},
(rs, rowNum) -> new PartitionSequenceIds(rs.getLong("part_id"), rs.getLong("sd_id"),
rs.getLong("serde_id")));
}
@SuppressWarnings("checkstyle:methodname")
private void _delete(final List<PartitionSequenceIds> subPartitionIds) {
final List<String> paramVariables = subPartitionIds.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] partIds =
subPartitionIds.stream().map(p -> new SqlParameterValue(Types.BIGINT, p.getPartId()))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] sdsIds =
subPartitionIds.stream().map(p -> new SqlParameterValue(Types.BIGINT, p.getSdsId()))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] serdeIds =
subPartitionIds.stream().filter(p -> p.getSerdeId() != null)
.map(p -> new SqlParameterValue(Types.BIGINT, p.getSerdeId()))
.toArray(SqlParameterValue[]::new);
final String paramVariableString = Joiner.on(",").skipNulls().join(paramVariables);
jdbcTemplate.update(
String.format(SQL.PARTITION_KEY_VALS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.PARTITION_PARAMS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.PARTITIONS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.SERDE_PARAMS_DELETES, paramVariableString), (Object[]) serdeIds);
jdbcTemplate.update(
String.format(SQL.BUCKETING_COLS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SORT_COLS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SDS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SERDES_DELETES, paramVariableString), (Object[]) serdeIds);
}
/**
* Drops, updates and adds partitions for a table.
*
* @param tableQName table name
* @param table table
* @param addedPartitionInfos new partitions to be added
* @param existingPartitionHolders existing partitions to be altered/updated
* @param deletePartitionNames existing partitions to be dropped
*/
public void addUpdateDropPartitions(final QualifiedName tableQName, final Table table,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionHolders,
final Set<String> deletePartitionNames) {
final long start = registry.clock().wallTime();
try {
if (!deletePartitionNames.isEmpty()) {
delete(tableQName, Lists.newArrayList(deletePartitionNames));
}
if (!existingPartitionHolders.isEmpty()) {
update(tableQName, existingPartitionHolders);
}
if (!addedPartitionInfos.isEmpty()) {
insert(tableQName, table, addedPartitionInfos);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAddDropPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@VisibleForTesting
private static class SQL {
static final String SERDES_INSERT =
"INSERT INTO SERDES (NAME,SLIB,SERDE_ID) VALUES (?,?,?)";
static final String SERDES_UPDATE =
"UPDATE SERDES SET NAME=?,SLIB=? WHERE SERDE_ID=?";
static final String SERDES_DELETES =
"DELETE FROM SERDES WHERE SERDE_ID in (%s)";
static final String SERDE_PARAMS_INSERT =
"INSERT INTO SERDE_PARAMS(PARAM_VALUE,SERDE_ID,PARAM_KEY) VALUES (?,?,?)";
static final String SERDE_PARAMS_INSERT_UPDATE =
"INSERT INTO SERDE_PARAMS(PARAM_VALUE,SERDE_ID,PARAM_KEY) VALUES (?,?,?) "
+ "ON DUPLICATE KEY UPDATE PARAM_VALUE=?";
static final String SERDE_PARAMS_DELETES =
"DELETE FROM SERDE_PARAMS WHERE SERDE_ID in (%s)";
static final String SDS_INSERT =
"INSERT INTO SDS (OUTPUT_FORMAT,IS_COMPRESSED,CD_ID,IS_STOREDASSUBDIRECTORIES,SERDE_ID,LOCATION, "
+ "INPUT_FORMAT,NUM_BUCKETS,SD_ID) VALUES (?,?,?,?,?,?,?,?,?)";
static final String SDS_UPDATE =
"UPDATE SDS SET OUTPUT_FORMAT=?,IS_COMPRESSED=?,IS_STOREDASSUBDIRECTORIES=?,LOCATION=?, "
+ "INPUT_FORMAT=? WHERE SD_ID=?";
static final String BUCKETING_COLS_DELETES =
"DELETE FROM BUCKETING_COLS WHERE SD_ID in (%s)";
static final String SORT_COLS_DELETES =
"DELETE FROM SORT_COLS WHERE SD_ID in (%s)";
static final String SDS_DELETES =
"DELETE FROM SDS WHERE SD_ID in (%s)";
static final String PARTITIONS_INSERT =
"INSERT INTO PARTITIONS(LAST_ACCESS_TIME,TBL_ID,CREATE_TIME,SD_ID,PART_NAME,PART_ID) VALUES (?,?,?,?,?,?)";
static final String PARTITIONS_DELETES =
"DELETE FROM PARTITIONS WHERE PART_ID in (%s)";
static final String PARTITION_PARAMS_INSERT =
"INSERT INTO PARTITION_PARAMS (PARAM_VALUE,PART_ID,PARAM_KEY) VALUES (?,?,?)";
static final String PARTITION_PARAMS_INSERT_UPDATE =
"INSERT INTO PARTITION_PARAMS (PARAM_VALUE,PART_ID,PARAM_KEY) VALUES (?,?,?) "
+ "ON DUPLICATE KEY UPDATE PARAM_VALUE=?";
static final String PARTITION_PARAMS_DELETES =
"DELETE FROM PARTITION_PARAMS WHERE PART_ID in (%s)";
static final String PARTITION_KEY_VALS_INSERT =
"INSERT INTO PARTITION_KEY_VALS(PART_ID,PART_KEY_VAL,INTEGER_IDX) VALUES (?,?,?)";
static final String PARTITION_KEY_VALS_DELETES =
"DELETE FROM PARTITION_KEY_VALS WHERE PART_ID in (%s)";
static final String PARTITIONS_SELECT_ALL =
"SELECT P.PART_ID, P.SD_ID, S.SERDE_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID "
+ "JOIN PARTITIONS P ON T.TBL_ID=P.TBL_ID JOIN SDS S ON P.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=? limit %d";
static final String PARTITIONS_SELECT =
"SELECT P.PART_ID, P.SD_ID, S.SERDE_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID "
+ "JOIN PARTITIONS P ON T.TBL_ID=P.TBL_ID JOIN SDS S ON P.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=? and P.PART_NAME in (%s)";
static final String TABLE_SELECT =
"SELECT T.TBL_ID, S.CD_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID JOIN SDS S ON T.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=?";
}
}
| 9,570 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/SequenceGeneration.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
/**
* This class is used to generate the sequence ids.
*
* @author amajumdar
* @since 1.1.x
*/
@Slf4j
@Transactional("hiveTxManager")
public class SequenceGeneration {
/**
* MPartition sequence number.
**/
public static final String SEQUENCE_NAME_PARTITION = "org.apache.hadoop.hive.metastore.model.MPartition";
/**
* MSerDeInfo sequence number.
**/
public static final String SEQUENCE_NAME_SERDES = "org.apache.hadoop.hive.metastore.model.MSerDeInfo";
/**
* MStorageDescriptor sequence number.
**/
public static final String SEQUENCE_NAME_SDS = "org.apache.hadoop.hive.metastore.model.MStorageDescriptor";
private final JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param jdbcTemplate JDBC template
*/
public SequenceGeneration(@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate jdbcTemplate) {
this.jdbcTemplate = jdbcTemplate;
}
/**
* Returns the current sequence ids and increments the sequence ids by the given <code>size</code>.
*
* @param size number of records getting inserted
* @param sequenceParamName the sequence Parameter Name
* @return current sequence ids
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public Long newPartitionSequenceIdByName(final int size, final String sequenceParamName) {
Long result = null;
try {
//Get current sequence number
result = jdbcTemplate.queryForObject(SQL.SEQUENCE_NEXT_VAL_BYNAME,
new Object[]{sequenceParamName}, Long.class);
} catch (EmptyResultDataAccessException e) {
log.warn("Failed getting the sequence ids for partition", e);
} catch (Exception e) {
throw new ConnectorException("Failed retrieving the sequence numbers.");
}
try {
if (result == null) {
result = 1L; //init to 1L in case there's no records
jdbcTemplate.update(SQL.SEQUENCE_INSERT_VAL, result + size, sequenceParamName);
} else {
jdbcTemplate.update(SQL.SEQUENCE_UPDATE_VAL, result + size, sequenceParamName);
}
return result;
} catch (Exception e) {
throw new ConnectorException("Failed updating the sequence ids for partition", e);
}
}
@VisibleForTesting
private static class SQL {
static final String SEQUENCE_INSERT_VAL =
"INSERT INTO SEQUENCE_TABLE(NEXT_VAL,SEQUENCE_NAME) VALUES (?,?)";
static final String SEQUENCE_UPDATE_VAL =
"UPDATE SEQUENCE_TABLE SET NEXT_VAL=? WHERE SEQUENCE_NAME=?";
static final String SEQUENCE_NEXT_VAL_BYNAME =
"SELECT NEXT_VAL FROM SEQUENCE_TABLE WHERE SEQUENCE_NAME=? FOR UPDATE";
}
}
| 9,571 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlDatabase.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.MapDifference;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.rowset.SqlRowSet;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nullable;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to update database metadata.
*
* @author amajumdar
* @since 1.3.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlDatabase {
private static final String COL_URI = "uri";
private static final String COL_OWNER = "owner";
private static final String COL_PARAM_KEY = "param_key";
private static final String COL_PARAM_VALUE = "param_value";
private final Registry registry;
private final JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
/**
* Constructor.
*
* @param connectorContext server context
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
*/
public DirectSqlDatabase(
final ConnectorContext connectorContext,
final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric
) {
this.registry = connectorContext.getRegistry();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
}
/**
* Returns the database internal id.
* @param databaseName database name
* @return database id
*/
private Long getDatabaseId(final QualifiedName databaseName) {
try {
return jdbcTemplate.queryForObject(SQL.GET_DATABASE_ID,
new String[]{databaseName.getDatabaseName()},
new int[]{Types.VARCHAR}, Long.class);
} catch (EmptyResultDataAccessException e) {
log.debug("Database {} not found.", databaseName);
throw new DatabaseNotFoundException(databaseName);
}
}
/**
* Returns the database.
* @param databaseName database name
* @return database
*/
@Transactional(readOnly = true)
public DatabaseInfo getDatabase(final QualifiedName databaseName) {
final Long id = getDatabaseId(databaseName);
return getDatabaseById(id, databaseName);
}
private DatabaseInfo getDatabaseById(final Long id, final QualifiedName databaseName) {
DatabaseInfo result = null;
try {
// Retrieve databaseRowSet info record
final SqlRowSet databaseRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE,
new Object[]{id}, new int[]{Types.BIGINT});
if (databaseRowSet.first()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdBy(databaseRowSet.getString(COL_OWNER)).build();
//Retrieve databaseRowSet params
final Map<String, String> metadata = Maps.newHashMap();
try {
final SqlRowSet paramRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE_PARAMS,
new Object[]{id}, new int[]{Types.BIGINT});
while (paramRowSet.next()) {
metadata.put(paramRowSet.getString(COL_PARAM_KEY),
paramRowSet.getString(COL_PARAM_VALUE));
}
} catch (EmptyResultDataAccessException ignored) { }
result = DatabaseInfo.builder()
.name(databaseName)
.uri(databaseRowSet.getString(COL_URI))
.auditInfo(auditInfo).metadata(metadata).build();
}
} catch (EmptyResultDataAccessException e) {
log.debug("Database {} not found.", databaseName);
throw new DatabaseNotFoundException(databaseName);
}
return result;
}
/**
* Updates the database object.
* @param databaseInfo database object
*/
public void update(final DatabaseInfo databaseInfo) {
log.debug("Start: Database update using direct sql for {}", databaseInfo.getName());
final long start = registry.clock().wallTime();
try {
final Long databaseId = getDatabaseId(databaseInfo.getName());
final DatabaseInfo existingDatabaseInfo = getDatabaseById(databaseId, databaseInfo.getName());
final Map<String, String> newMetadata = databaseInfo.getMetadata() == null ? Maps.newHashMap()
: databaseInfo.getMetadata();
final MapDifference<String, String> diff = Maps.difference(existingDatabaseInfo.getMetadata(), newMetadata);
insertDatabaseParams(databaseId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateDatabaseParams(databaseId, updateParams);
final String uri =
Strings.isNullOrEmpty(databaseInfo.getUri()) ? existingDatabaseInfo.getUri() : databaseInfo.getUri();
final String newOwner = getOwner(databaseInfo.getAudit());
final String owner =
Strings.isNullOrEmpty(newOwner) ? newOwner : existingDatabaseInfo.getAudit().getCreatedBy();
jdbcTemplate.update(SQL.UPDATE_DATABASE, new SqlParameterValue(Types.VARCHAR, uri),
new SqlParameterValue(Types.VARCHAR, owner),
new SqlParameterValue(Types.BIGINT, databaseId));
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAlterDatabase.getMetricName(), registry.clock().wallTime() - start);
log.debug("End: Database update using direct sql for {}", databaseInfo.getName());
}
}
private String getOwner(@Nullable final AuditInfo audit) {
return audit != null ? audit.getCreatedBy() : null;
}
private void insertDatabaseParams(final Long id, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.map(s -> new Object[]{id, s.getKey(), s.getValue()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.INSERT_DATABASE_PARAMS, paramsList,
new int[]{Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
}
}
private void updateDatabaseParams(final Long id, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.map(s -> new Object[]{s.getValue(), id, s.getKey()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.UPDATE_DATABASE_PARAMS, paramsList,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
}
}
@VisibleForTesting
private static class SQL {
static final String GET_DATABASE_ID =
"select d.db_id from DBS d where d.name=?";
static final String GET_DATABASE =
"select d.desc, d.name, d.db_location_uri uri, d.owner_name owner from DBS d where d.db_id=?";
static final String GET_DATABASE_PARAMS =
"select param_key, param_value from DATABASE_PARAMS where db_id=?";
static final String UPDATE_DATABASE_PARAMS =
"update DATABASE_PARAMS set param_value=? WHERE db_id=? and param_key=?";
static final String INSERT_DATABASE_PARAMS =
"insert into DATABASE_PARAMS(db_id,param_key,param_value) values (?,?,?)";
static final String UPDATE_DATABASE =
"UPDATE DBS SET db_location_uri=?, owner_name=? WHERE db_id=?";
}
}
| 9,572 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/PartitionHolder.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import lombok.Data;
import org.apache.hadoop.hive.metastore.api.Partition;
/**
* A wrapper class to hold the Partition internal ids and the partition either as PartitionInfo or Partition.
* @author amajumdar
* @since 1.1.x
*/
@Data
public class PartitionHolder {
// id of the PARTITIONS table
private Long id;
// id of the SDS table
private Long sdId;
// id of the SERDES table
private Long serdeId;
private PartitionInfo partitionInfo;
private Partition partition;
/**
* Constructor populating the ids and partitionInfo.
* @param id partition id
* @param sdId partition storage id
* @param serdeId partition serde id
* @param partitionInfo partition info
*/
public PartitionHolder(final Long id, final Long sdId, final Long serdeId, final PartitionInfo partitionInfo) {
this.id = id;
this.sdId = sdId;
this.serdeId = serdeId;
this.partitionInfo = partitionInfo;
}
/**
* Constructor populating the partition only.
* @param partition partition
*/
public PartitionHolder(final Partition partition) {
this.partition = partition;
}
}
| 9,573 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlGetPartition.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.HiveFilterPartition;
import com.netflix.metacat.connector.hive.util.PartitionFilterGenerator;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.StringReader;
import java.sql.Types;
import java.time.Instant;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to get partitions.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlGetPartition {
/**
* DateCreated field users can request to sort on.
*/
public static final String FIELD_DATE_CREATED = "dateCreated";
private static final String FIELD_BATCHID = "batchid";
private static final String AUDIT_DB = "audit";
private static final Pattern AUDIT_TABLENAME_PATTERN = Pattern.compile(
"(?<db>.*)__(?<table>.*)__audit(.*)$"
);
private static final String PARTITION_NAME = "name";
private static final String PARTITION_URI = "uri";
private final ThreadServiceManager threadServiceManager;
private final Registry registry;
private JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
private final String catalogName;
private final Config config;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param connectorContext server context
* @param threadServiceManager thread service manager
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
*/
public DirectSqlGetPartition(
final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager,
@Qualifier("hiveReadJdbcTemplate") final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric
) {
this.catalogName = connectorContext.getCatalogName();
this.threadServiceManager = threadServiceManager;
this.registry = connectorContext.getRegistry();
this.config = connectorContext.getConfig();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
configuration = connectorContext.getConfiguration();
}
/**
* Number of partitions for the given table.
*
* @param requestContext request context
* @param tableName tableName
* @return Number of partitions
*/
@Transactional(readOnly = true)
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName
) {
final long start = registry.clock().wallTime();
// Handler for reading the result set
final ResultSetExtractor<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
try {
final Optional<QualifiedName> sourceTable
= getSourceTableName(tableName.getDatabaseName(), tableName.getTableName(),
false);
return sourceTable.map(
qualifiedName ->
jdbcTemplate.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_COUNT,
new String[]{
tableName.getDatabaseName(),
tableName.getTableName(),
qualifiedName.getDatabaseName(),
qualifiedName.getTableName(), },
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR}, handler))
.orElseGet(
() -> jdbcTemplate.query(SQL.SQL_GET_PARTITION_COUNT,
new String[]{
tableName.getDatabaseName(),
tableName.getTableName(), },
new int[]{Types.VARCHAR, Types.VARCHAR}, handler));
} catch (Exception e) {
throw new ConnectorException("Failed getting the partition count", e);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionCount.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Gets the Partitions based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table name
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partitions
*/
@Transactional(readOnly = true)
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest
) {
final long start = registry.clock().wallTime();
try {
return this.getPartitions(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getFilter(),
partitionsRequest.getSort(),
partitionsRequest.getPageable(),
partitionsRequest.getIncludePartitionDetails(),
partitionsRequest.getIncludeAuditOnly()
).stream().map(PartitionHolder::getPartitionInfo).collect(Collectors.toList());
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Gets the partition uris based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partition names
*/
@Transactional(readOnly = true)
public List<String> getPartitionUris(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
return filterPartitionsColumn(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionNames,
PARTITION_URI,
filterExpression,
sort,
pageable,
partitionsRequest.getIncludeAuditOnly());
} else {
final ResultSetExtractor<List<String>> handler = rs -> {
final List<String> uris = Lists.newArrayList();
while (rs.next()) {
uris.add(rs.getString(PARTITION_URI));
}
return uris;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(),
null, partitionNames, SQL.SQL_GET_PARTITIONS_URI, handler, sort, pageable,
partitionsRequest.getIncludeAuditOnly());
}
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
/**
* query partitions using filters from name or uri column.
*/
private List<String> filterPartitionsColumn(
final String databaseName,
final String tableName,
final List<String> partitionNames,
final String columnName,
final String filterExpression,
final Sort sort,
final Pageable pageable,
final boolean forceDisableAudit) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition()
: new FilterPartition();
// batch exists
final boolean isBatched =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
ResultSetExtractor<List<String>> handler = rs -> {
final List<String> columns = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString(PARTITION_NAME);
final String uri = rs.getString(PARTITION_URI);
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression)
|| filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
columns.add(rs.getString(columnName));
}
}
return columns;
};
return getHandlerResults(databaseName,
tableName, filterExpression, partitionNames,
SQL.SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable, forceDisableAudit);
}
/**
* Gets the partition names/keys based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partition names
*/
@Transactional(readOnly = true)
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
return filterPartitionsColumn(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionNames,
PARTITION_NAME,
filterExpression,
sort,
pageable,
partitionsRequest.getIncludeAuditOnly());
} else {
final ResultSetExtractor<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(),
null, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY,
handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
}
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
/**
* getPartitionNames.
*
* @param context request context
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Transactional(readOnly = true)
public Map<String, List<QualifiedName>> getPartitionNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
final long start = registry.clock().wallTime();
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL.SQL_GET_PARTITION_NAMES_BY_URI);
final List<SqlParameterValue> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" 1=2");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(new SqlParameterValue(Types.VARCHAR, uri + "%"));
});
} else {
queryBuilder.append(" location in (");
Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
queryBuilder.append(")");
params.addAll(uris.stream()
.map(uri -> new SqlParameterValue(Types.VARCHAR, uri)).collect(Collectors.toList()));
}
final ResultSetExtractor<Map<String, List<QualifiedName>>> handler = rs -> {
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String partitionName = rs.getString("partition_name");
final String uri = rs.getString("location");
final List<QualifiedName> partitionNames = result.get(uri);
final QualifiedName qualifiedName =
QualifiedName.ofPartition(catalogName, schemaName, tableName, partitionName);
if (partitionNames == null) {
result.put(uri, Lists.newArrayList(qualifiedName));
} else {
partitionNames.add(qualifiedName);
}
}
return result;
};
try {
jdbcTemplate.query(queryBuilder.toString(), params.toArray(), handler);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionNames.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
@Transactional(readOnly = true)
protected Map<String, PartitionHolder> getPartitionHoldersByNames(final Table table,
final List<String> partitionNames,
final boolean forceDisableAudit) {
//this is internal call to get partitions, always set the forceDisableAudit = true
return this.getPartitions(
table.getDbName(),
table.getTableName(),
partitionNames,
null,
null,
null,
false,
forceDisableAudit
).stream().collect(Collectors.toMap(
p -> p.getPartitionInfo().getName().getPartitionName(),
p -> p)
);
}
private List<PartitionHolder> getPartitions(
final String databaseName,
final String tableName,
@Nullable final List<String> partitionIds,
@Nullable final String filterExpression,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includePartitionDetails,
final boolean forceDisableAudit
) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition()
: new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
final List<PartitionHolder> result = Lists.newArrayList();
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
int noOfRows = 0;
while (rs.next()) {
noOfRows++;
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression)
|| filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionHolder(id, sdId, serdeId,
PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName,
databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
// Fail if the number of partitions exceeds the threshold limit.
if (result.size() > config.getMaxPartitionsThreshold()) {
registry.counter(registry.createId(HiveMetrics.CounterHiveGetPartitionsExceedThresholdFailure
.getMetricName()).withTags(tableQName.parts())).increment();
final String message =
String.format("Number of partitions queried for table %s exceeded the threshold %d",
tableQName, config.getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
registry.gauge(registry.createId(HiveMetrics.GaugePreExpressionFilterGetPartitionsCount
.getMetricName()).withTags(tableQName.parts())).set(noOfRows);
return result;
};
final List<PartitionHolder> partitions = this.getHandlerResults(
databaseName,
tableName,
filterExpression,
partitionIds,
SQL.SQL_GET_PARTITIONS,
handler,
sort,
pageable,
forceDisableAudit
);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionHolder partitionHolder : partitions) {
partIds.add(partitionHolder.getId());
sdIds.add(partitionHolder.getSdId());
serdeIds.add(partitionHolder.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS,
"part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS,
"sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS,
"serde_id", serdeParams)));
}
ListenableFuture<List<Void>> future = null;
try {
future = Futures.allAsList(futures);
final int getPartitionsDetailsTimeout = Integer.parseInt(configuration
.getOrDefault(HiveConfigConstants.GET_PARTITION_DETAILS_TIMEOUT, "120"));
future.get(getPartitionsDetailsTimeout, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
try {
if (future != null) {
future.cancel(true);
}
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partition details.");
}
Throwables.propagate(e);
}
for (PartitionHolder partitionHolder : partitions) {
partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
partitionHolder.getPartitionInfo().getSerde()
.setParameters(sdParams.get(partitionHolder.getSdId()));
partitionHolder.getPartitionInfo().getSerde()
.setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));
}
}
return partitions;
}
private <T> List<T> getHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor<List<T>> resultSetExtractor,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
List<T> partitions;
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final PartitionFilterGenerator generator =
new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName, forceDisableAudit),
config.escapePartitionNameOnFilter());
String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(generator, null);
if (generator.isOptimized()) {
filterSql = generator.getOptimizedSql();
}
if (filterSql != null && !filterSql.isEmpty()) {
filterSql = " and (" + filterSql + ")";
}
partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds,
sql, resultSetExtractor,
generator.joinSql(), filterSql,
generator.getParams(), sort, pageable, forceDisableAudit);
} else {
partitions = getHandlerResults(databaseName, tableName, null, partitionIds,
sql, resultSetExtractor,
null, null,
null, sort, pageable, forceDisableAudit);
}
} catch (Exception e) {
log.warn("Experiment: Get partitions for for table {} filter {}"
+ " failed with error {}", tableQName.toString(), filterExpression,
e.getMessage());
registry.counter(registry
.createId(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.getMetricName())
.withTags(tableQName.parts())).increment();
partitions = getHandlerResults(databaseName, tableName,
filterExpression, partitionIds, sql, resultSetExtractor, null,
prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable, forceDisableAudit);
}
return partitions;
}
private List<FieldSchema> getPartitionKeys(final String databaseName,
final String tableName,
final boolean forceDisableAudit) {
final List<FieldSchema> result = Lists.newArrayList();
final ResultSetExtractor<List<FieldSchema>> handler = rs -> {
while (rs.next()) {
final String name = rs.getString("pkey_name");
final String type = rs.getString("pkey_type");
result.add(new FieldSchema(name, type, null));
}
return result;
};
final Optional<QualifiedName> sourceTable = getSourceTableName(databaseName, tableName, forceDisableAudit);
return sourceTable.map(qualifiedName -> jdbcTemplate
.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_KEYS,
new Object[]{databaseName, tableName, qualifiedName.getDatabaseName(), qualifiedName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR},
handler))
.orElseGet(() -> jdbcTemplate
.query(SQL.SQL_GET_PARTITION_KEYS,
new Object[]{databaseName, tableName},
new int[]{Types.VARCHAR, Types.VARCHAR},
handler));
}
private String getDateCreatedSqlCriteria(final String filterExpression) {
final StringBuilder result = new StringBuilder();
Collection<String> values = Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
values = (Collection<String>) new PartitionParser(
new StringReader(filterExpression)).filter().jjtAccept(new PartitionParamParserEval(),
null
);
} catch (Throwable ignored) {
//
}
}
for (String value : values) {
if (result.length() != 0) {
result.append(" and ");
}
result.append(value.replace("dateCreated", "p.CREATE_TIME"));
}
return result.toString();
}
private Void populateParameters(final List<Long> ids,
final String sql,
final String idName,
final Map<Long, Map<String, String>> params) {
if (ids.size() > 5000) {
final List<List<Long>> subFilterPartitionNamesList = Lists.partition(ids, 5000);
subFilterPartitionNamesList.forEach(subPartitions ->
params.putAll(this.getParameters(subPartitions, sql, idName)));
} else {
params.putAll(this.getParameters(ids, sql, idName));
}
return null;
}
private Map<Long, Map<String, String>> getParameters(final List<Long> ids, final String sql, final String idName) {
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(sql);
if (!ids.isEmpty()) {
queryBuilder.append(" and ").append(idName)
.append(" in ('").append(Joiner.on("','").skipNulls().join(ids)).append("')");
}
final ResultSetExtractor<Map<Long, Map<String, String>>> handler = rs -> {
final Map<Long, Map<String, String>> result = Maps.newHashMap();
while (rs.next()) {
final Long id = rs.getLong(idName);
final String key = rs.getString("param_key");
final String value = rs.getString("param_value");
final Map<String, String> parameters = result.computeIfAbsent(id, k -> Maps.newHashMap());
parameters.put(key, value);
}
return result;
};
return jdbcTemplate.query(queryBuilder.toString(), handler);
}
private Collection<String> getSinglePartitionExprs(@Nullable final String filterExpression) {
Collection<String> result = Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
result = (Collection<String>) new PartitionParser(
new StringReader(filterExpression)).filter().jjtAccept(config.escapePartitionNameOnFilter()
? new HivePartitionKeyParserEval() : new PartitionKeyParserEval(),
null
);
} catch (Throwable ignored) {
//
}
}
if (result != null) {
result = result.stream().filter(s -> !(s.startsWith("batchid=") || s.startsWith("dateCreated="))).collect(
Collectors.toList());
}
return result;
}
private String prepareFilterSql(@Nullable final String filterExpression) {
final StringBuilder result = new StringBuilder();
// Support for dateCreated
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
for (String singlePartitionExpr : singlePartitionExprs) {
result.append(" and p.PART_NAME like '%").append(singlePartitionExpr).append("%'");
}
if (!Strings.isNullOrEmpty(dateCreatedSqlCriteria)) {
result.append(" and ").append(dateCreatedSqlCriteria);
}
return result.toString();
}
private <T> List<T> getHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
//
// Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes)
// will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so
//
List<T> partitions = Lists.newArrayList();
if (partitionIds != null && partitionIds.size() > 5000) {
final List<List<String>> subFilterPartitionNamesList = Lists.partition(partitionIds, 5000);
final List<T> finalPartitions = partitions;
subFilterPartitionNamesList.forEach(
subPartitionIds -> finalPartitions.addAll(
this.getSubHandlerResultsFromQuery(
databaseName,
tableName,
filterExpression,
subPartitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
)
)
);
} else {
partitions = this.getSubHandlerResultsFromQuery(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
);
}
return partitions;
}
private <T> List<T> getSubHandlerResultsFromQuery(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
if (getSourceTableName(databaseName, tableName, forceDisableAudit).isPresent()) {
return this.getSubHandlerAuditTableResults(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
);
} else {
return this.getSubHandlerResults(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable
);
}
}
private <T> List<T> getSubHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
// Create the sql
final StringBuilder queryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql);
addSortPageableFilter(queryBuilder, filterExpression, sort, pageable);
List<T> partitions;
final ImmutableList.Builder<Object> paramsBuilder = ImmutableList.builder().add(databaseName, tableName);
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
final List<Object> params = paramsBuilder.build();
final Object[] oParams = new Object[params.size()];
partitions = (List) jdbcTemplate.query(
queryBuilder.toString(), params.toArray(oParams), resultSetExtractor);
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
partitions = processPageable(partitions, pageable);
}
return partitions;
}
/**
* Check if an audit table, i.e. the database is audit and the table name matches WAP table pattern
*
* @param databaseName database
* @param tableName table name
* @return true or false
*/
private Optional<QualifiedName> getSourceTableName(final String databaseName,
final String tableName,
final boolean forceDisableAudit) {
Optional<QualifiedName> sourceTable = Optional.empty();
final boolean isAuditProcessingEnabled = Boolean.valueOf(configuration
.getOrDefault(HiveConfigConstants.ENABLE_AUDIT_PROCESSING, "true"));
if (!forceDisableAudit && isAuditProcessingEnabled && databaseName.equals(AUDIT_DB)) {
final Matcher matcher = AUDIT_TABLENAME_PATTERN.matcher(tableName);
if (matcher.matches()) {
final String sourceDatabaseName = matcher.group("db");
final String sourceTableName = matcher.group("table");
sourceTable = Optional.of(QualifiedName.ofTable(this.catalogName, sourceDatabaseName, sourceTableName));
}
}
return sourceTable;
}
/**
* Process audit table partition related query.
*
* @param databaseName database name
* @param tableName table name
* @param filterExpression filter
* @param partitionIds partition ids
* @param sql query sql
* @param resultSetExtractor result extractor
* @param joinSql join sql
* @param filterSql filter sql
* @param filterParams filter parameters
* @param sort sort object
* @param pageable pageable object
* @param <T> query object
* @return query results
*/
private <T> List<T> getSubHandlerAuditTableResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
final Optional<QualifiedName> sourceTableName = getSourceTableName(databaseName, tableName, forceDisableAudit);
List<T> partitions = Lists.newArrayList();
if (sourceTableName.isPresent()) {
final StringBuilder auditTableQueryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql);
final StringBuilder sourceTableQueryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql)
.append(SQL.SQL_NOT_IN_AUTDI_TABLE_PARTITIONS);
//union the two queries, using ALL for optimization since the above sql already filtered out the overlap
//partitions from the source table
auditTableQueryBuilder.append(" UNION ALL ").append(sourceTableQueryBuilder);
addSortPageableFilter(auditTableQueryBuilder, filterExpression, sort, pageable);
// Params
final ImmutableList.Builder<Object> paramsBuilder = ImmutableList.builder().add(databaseName, tableName);
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
paramsBuilder.add(sourceTableName.get().getDatabaseName(), sourceTableName.get().getTableName());
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
paramsBuilder.add(databaseName, tableName);
final List<Object> params = paramsBuilder.build();
final Object[] oParams = new Object[params.size()];
partitions = (List) jdbcTemplate.query(
auditTableQueryBuilder.toString(), params.toArray(oParams), resultSetExtractor);
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
partitions = processPageable(partitions, pageable);
}
}
return partitions;
}
private StringBuilder getBasicPartitionQuery(
@Nullable final List<String> partitionIds,
final String sql,
@Nullable final String joinSql,
@Nullable final String filterSql
) {
final StringBuilder tableQueryBuilder = new StringBuilder(sql);
if (joinSql != null) {
tableQueryBuilder.append(joinSql);
}
tableQueryBuilder.append(" where d.NAME = ? and t.TBL_NAME = ?");
if (filterSql != null) {
tableQueryBuilder.append(filterSql);
}
if (partitionIds != null && !partitionIds.isEmpty()) {
final List<String> paramVariables = partitionIds.stream().map(s -> "?").collect(Collectors.toList());
tableQueryBuilder.append(" and p.PART_NAME in (")
.append(Joiner.on(",").skipNulls().join(paramVariables)).append(")");
}
return tableQueryBuilder;
}
//adding the sort and limit to sql query
private void addSortPageableFilter(
final StringBuilder queryBuilder,
@Nullable final String filterExpression,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
if (pageable != null && pageable.isPageable() && Strings.isNullOrEmpty(filterExpression)) {
if (sort == null || !sort.hasSort()) {
queryBuilder.append(" order by id");
//this must be id, which is used by AuditTable and regular table pagination
}
queryBuilder.append(" limit ").append(pageable.getOffset()).append(',').append(pageable.getLimit());
}
}
private <T> List<T> processPageable(final List<T> partitions,
final Pageable pageable) {
int limit = pageable.getOffset() + pageable.getLimit();
if (partitions.size() < limit) {
limit = partitions.size();
}
if (pageable.getOffset() > limit) {
return Lists.newArrayList();
} else {
return partitions.subList(pageable.getOffset(), limit);
}
}
@VisibleForTesting
private static class SQL {
static final String SQL_GET_PARTITIONS_WITH_KEY_URI =
//Add p.part_id as id to allow pagination using 'order by id'
"select p.part_id as id, p.PART_NAME as name, p.CREATE_TIME as dateCreated, sds.location uri"
+ " from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID "
+ "join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID";
static final String SQL_GET_PARTITIONS_URI =
"select p.part_id as id, sds.location uri"
+ " from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID "
+ "join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID";
static final String SQL_GET_PARTITIONS_WITH_KEY =
"select p.part_id as id, p.PART_NAME as name from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID";
static final String SQL_GET_PARTITIONS =
"select p.part_id as id, p.PART_NAME as name, p.CREATE_TIME as dateCreated,"
+ " sds.location uri, sds.input_format, sds.output_format,"
+ " sds.sd_id, s.serde_id, s.slib from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID"
+ " join SERDES s on sds.SERDE_ID=s.SERDE_ID";
static final String SQL_GET_PARTITION_NAMES_BY_URI =
"select p.part_name partition_name,t.tbl_name table_name,d.name schema_name,"
+ " sds.location from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID"
+ " join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID where";
static final String SQL_GET_PARTITION_PARAMS =
"select part_id, param_key, param_value from PARTITION_PARAMS where 1=1";
static final String SQL_GET_SD_PARAMS =
"select sd_id, param_key, param_value from SD_PARAMS where 1=1";
static final String SQL_GET_SERDE_PARAMS =
"select serde_id, param_key, param_value from SERDE_PARAMS where 1=1";
static final String SQL_GET_PARTITION_KEYS =
"select pkey_name, pkey_type from PARTITION_KEYS as p "
+ "join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID where d.name=? and t.tbl_name=? order by integer_idx";
static final String SQL_GET_PARTITION_COUNT =
"select count(*) count from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID"
+ " where d.NAME = ? and t.TBL_NAME = ?";
//audit table, takes precedence in case there are parititons overlap with the source
static final String SQL_GET_AUDIT_TABLE_PARTITION_COUNT =
"select count(distinct p1.part_name) count from PARTITIONS as p1 "
+ "join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1 on t1.DB_ID = d1.DB_ID "
+ "where ( d1.NAME = ? and t1.TBL_NAME = ? ) "
+ "or ( d1.NAME = ? and t1.TBL_NAME = ?)";
// using nest order https://stackoverflow.com/questions/6965333/mysql-union-distinct
static final String SQL_GET_AUDIT_TABLE_PARTITION_KEYS =
"select pkey_name, pkey_type from ("
+ "(select pkey_name, pkey_type, integer_idx from PARTITION_KEYS as p1 "
+ "join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1 "
+ "on t1.DB_ID = d1.DB_ID where d1.NAME = ? and t1.TBL_NAME = ? "
+ ") UNION "
+ "(select pkey_name, pkey_type, integer_idx from PARTITION_KEYS as p2 "
+ "join TBLS as t2 on t2.TBL_ID = p2.TBL_ID join DBS as d2 "
+ "on t2.DB_ID = d2.DB_ID where d2.NAME = ? and t2.TBL_NAME = ?)) as pp order by integer_idx";
//select the partitions not in audit table
static final String SQL_NOT_IN_AUTDI_TABLE_PARTITIONS =
" and p.PART_NAME not in ("
+ " select p1.PART_NAME from PARTITIONS as p1"
+ " join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1"
+ " on t1.DB_ID = d1.DB_ID where d1.NAME = ? and t1.TBL_NAME = ? )"; //audit table
}
}
| 9,574 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/PartitionSequenceIds.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
/**
* Class representing the ids for a partition.
*
* @author amajumdar
* @since 1.1.x
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class PartitionSequenceIds {
// id of the PARTITIONS table
private Long partId;
// id of the SDS table
private Long sdsId;
// id of the SERDES table
private Long serdeId;
}
| 9,575 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastTableServiceProxy.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.Cacheable;
/**
* Proxy class to get the metadata info from cache if exists.
*/
@CacheConfig(cacheNames = "metacat")
public class HiveConnectorFastTableServiceProxy {
private final IcebergTableHandler icebergTableHandler;
private final HiveConnectorInfoConverter hiveMetacatConverters;
private final CommonViewHandler commonViewHandler;
/**
* Constructor.
*
* @param hiveMetacatConverters hive converter
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
*/
public HiveConnectorFastTableServiceProxy(
final HiveConnectorInfoConverter hiveMetacatConverters,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler
) {
this.hiveMetacatConverters = hiveMetacatConverters;
this.icebergTableHandler = icebergTableHandler;
this.commonViewHandler = commonViewHandler;
}
/**
* Return the table metadata from cache if exists. If not exists, make the iceberg call to refresh it.
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param info table info stored in hive metastore
* @param includeInfoDetails if true, will include more details like the manifest file content
* @param useCache true, if table can be retrieved from cache
* @return TableInfo
*/
@Cacheable(key = "'iceberg.table.' + #includeInfoDetails + '.' + #tableMetadataLocation", condition = "#useCache")
public TableInfo getIcebergTable(final QualifiedName tableName,
final String tableMetadataLocation,
final TableInfo info,
final boolean includeInfoDetails,
final boolean useCache) {
final IcebergTableWrapper icebergTable =
this.icebergTableHandler.getIcebergTable(tableName, tableMetadataLocation, includeInfoDetails);
return this.hiveMetacatConverters.fromIcebergTableToTableInfo(tableName,
icebergTable, tableMetadataLocation, info);
}
/**
* Return the common view metadata from cache if exists. If not exists, make the common view handler call
* to refresh it.
* @param name common view name
* @param tableMetadataLocation common view metadata location
* @param info common view info stored in hive metastore
* @param hiveTypeConverter hive type converter
* @param useCache true, if table can be retrieved from cache
* @return TableInfo
*/
@Cacheable(key = "'iceberg.view.' + #tableMetadataLocation", condition = "#useCache")
public TableInfo getCommonViewTableInfo(final QualifiedName name,
final String tableMetadataLocation,
final TableInfo info,
final HiveTypeConverter hiveTypeConverter,
final boolean useCache) {
return commonViewHandler.getCommonViewTableInfo(name, tableMetadataLocation, info, hiveTypeConverter);
}
}
| 9,576 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/TableSequenceIds.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import lombok.Getter;
import javax.annotation.Nullable;
/**
* Class representing the ids for a table.
*
* @author amajumdar
*/
@Getter
public class TableSequenceIds {
private final Long tableId;
private final Long cdId;
private final Long sdsId;
private final Long serdeId;
/**
* Constructor.
* @param tableId table id
* @param cdId column id
*/
public TableSequenceIds(final Long tableId,
final Long cdId) {
this(tableId, cdId, null, null);
}
/**
* Constructor.
* @param tableId table id
* @param cdId column id
* @param sdsId sds id
* @param serdeId serde id
*/
public TableSequenceIds(final Long tableId,
final Long cdId,
@Nullable final Long sdsId,
@Nullable final Long serdeId) {
this.tableId = tableId;
this.cdId = cdId;
this.sdsId = sdsId;
this.serdeId = serdeId;
}
}
| 9,577 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Connector implementation using direct sql calls.
*
* @author amajumdar
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.sql;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,578 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/test/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/test/java/com/netflix/metacat/connector/jdbc/services/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Tests for services.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc.services;
| 9,579 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/JdbcExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.SQLException;
/**
* An interface to map JDBC SQLExceptions to Metacat Connector Exceptions.
*
* @author tgianos
* @author zhenl
* @see ConnectorException
* @see SQLException
* @since 1.0.0
*/
public interface JdbcExceptionMapper {
/**
* Convert JDBC exception to MetacatException.
*
* @param se The sql exception to map
* @param name The qualified name of the resource that was attempting to be accessed when the exception occurred
* @return A best attempt at a corresponding connector exception or generic with the SQLException as the cause
*/
default ConnectorException toConnectorException(
@NonNull @Nonnull final SQLException se,
@Nonnull @NonNull final QualifiedName name
) {
return new ConnectorException(se.getMessage(), se);
}
}
| 9,580 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/JdbcTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Type converter utilities for JDBC connectors.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public abstract class JdbcTypeConverter implements ConnectorTypeConverter {
private static final Pattern TYPE_PATTERN = Pattern.compile(
"^\\s*?"
+ "(\\w+(?:\\s(?:precision|varying))?)" // group 0
+ "\\s*?"
+ "(?:\\(\\s*?(\\d+)(?:\\s*?,\\s*?(\\d+))?\\s*?\\))?" // group 1 and 2
+ "\\s*?"
+ "(\\[\\](?:\\[\\])?)?" // group 3
+ "(?:\\s*?(\\w+(?:\\s\\w+)*))?$" // group 4
);
protected String[] splitType(final String type) {
final Matcher matcher = TYPE_PATTERN.matcher(type);
final int numGroups = matcher.groupCount();
if (matcher.find()) {
final String[] split = new String[numGroups];
for (int i = 0; i < numGroups; i++) {
split[i] = matcher.group(i + 1);
}
return split;
} else {
throw new IllegalArgumentException("Unable to parse " + type);
}
}
protected Type toMetacatBitType(@Nonnull final String[] bit) {
// No size parameter
if (bit[1] == null || Integer.parseInt(bit[1]) == 1) {
return BaseType.BOOLEAN;
} else {
final int bytes = (int) Math.ceil(Double.parseDouble(bit[1]) / 8.0);
return VarbinaryType.createVarbinaryType(bytes);
}
}
protected DecimalType toMetacatDecimalType(@Nonnull final String[] splitType) {
if (splitType[1] == null && splitType[2] == null) {
return DecimalType.createDecimalType();
} else if (splitType[1] != null) {
final int precision = Integer.parseInt(splitType[1]);
if (splitType[2] == null) {
return DecimalType.createDecimalType(precision);
} else {
return DecimalType.createDecimalType(precision, Integer.parseInt(splitType[2]));
}
} else {
throw new IllegalArgumentException("Illegal definition of a decimal type: " + Arrays.toString(splitType));
}
}
protected Type toMetacatCharType(@Nonnull final String[] splitType) {
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for char type");
}
final int size = Integer.parseInt(splitType[1]);
// Check if we're dealing with binary or not
if (splitType[4] != null) {
if (!splitType[4].equals("binary")) {
throw new IllegalArgumentException(
"Unrecognized extra field in char type: " + splitType[4] + ". Expected 'binary'."
);
}
return VarbinaryType.createVarbinaryType(size);
} else {
return CharType.createCharType(size);
}
}
protected Type toMetacatVarcharType(@Nonnull final String[] splitType) {
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for varchar type");
}
final int size = Integer.parseInt(splitType[1]);
// Check if we're dealing with binary or not
if (splitType[4] != null) {
if (!splitType[4].equals("binary")) {
throw new IllegalArgumentException(
"Unrecognized extra field in varchar type: " + splitType[4] + ". Expected 'binary'."
);
}
return VarbinaryType.createVarbinaryType(size);
} else {
return VarcharType.createVarcharType(size);
}
}
protected VarbinaryType toMetacatVarbinaryType(@Nonnull final String[] splitType) {
if (!splitType[0].equals("varbinary") && !splitType[0].equals("binary")) {
// Blob
return VarbinaryType.createVarbinaryType(Integer.MAX_VALUE);
}
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for varbinary type");
}
return VarbinaryType.createVarbinaryType(Integer.parseInt(splitType[1]));
}
protected Type toMetacatTimeType(@Nonnull final String[] splitType) {
if (splitType[4] != null && splitType[4].equals("with time zone")) {
return BaseType.TIME_WITH_TIME_ZONE;
} else {
return BaseType.TIME;
}
}
protected Type toMetacatTimestampType(@Nonnull final String[] splitType) {
if (splitType[4] != null && splitType[4].equals("with time zone")) {
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
} else {
return BaseType.TIMESTAMP;
}
}
}
| 9,581 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes related to getting metadata from a generic JDBC connection.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc;
| 9,582 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
import java.util.Locale;
/**
* Generic JDBC implementation of the ConnectorDatabaseService.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
@Getter
public class JdbcConnectorDatabaseService implements ConnectorDatabaseService {
private final DataSource dataSource;
private final JdbcExceptionMapper exceptionMapper;
/**
* Constructor.
*
* @param dataSource The jdbc datasource instance to use to make connections
* @param exceptionMapper The exception mapper to use
*/
@Inject
public JdbcConnectorDatabaseService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
this.dataSource = dataSource;
this.exceptionMapper = exceptionMapper;
}
/**
* {@inheritDoc}
*/
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo resource) {
final String databaseName = resource.getName().getDatabaseName();
log.debug("Beginning to create database {} for request {}", databaseName, context);
try (final Connection connection = this.dataSource.getConnection()) {
JdbcConnectorUtils.executeUpdate(connection, "CREATE DATABASE " + databaseName);
log.debug("Finished creating database {} for request {}", databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, resource.getName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
log.debug("Beginning to drop database {} for request {}", databaseName, context);
try (final Connection connection = this.dataSource.getConnection()) {
JdbcConnectorUtils.executeUpdate(connection, "DROP DATABASE " + databaseName);
log.debug("Finished dropping database {} for request {}", databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
log.debug("Beginning to get database metadata for {} for request {}", databaseName, context);
return DatabaseInfo.builder().name(name).build();
}
/**
* {@inheritDoc}
*/
@Override
public List<DatabaseInfo> list(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database metadata for catalog {} for request {}", catalogName, context);
final ImmutableList.Builder<DatabaseInfo> builder = ImmutableList.builder();
for (final QualifiedName dbName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, dbName));
}
log.debug("Finished listing database metadata for catalog {} for request {}", catalogName, context);
return builder.build();
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database names for catalog {} for request {}", catalogName, context);
try (final Connection connection = this.dataSource.getConnection()) {
final DatabaseMetaData metaData = connection.getMetaData();
final List<QualifiedName> names = Lists.newArrayList();
try (final ResultSet schemas = prefix == null || StringUtils.isEmpty(prefix.getDatabaseName())
? metaData.getSchemas(connection.getCatalog(), null)
: metaData
.getSchemas(
connection.getCatalog(),
prefix.getDatabaseName() + JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
)
) {
while (schemas.next()) {
final String schemaName = schemas.getString("TABLE_SCHEM").toLowerCase(Locale.ENGLISH);
// skip internal schemas
if (!schemaName.equals("information_schema")) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
}
}
}
// Does user want sorting?
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing database names for catalog {} for request {}", catalogName, context);
return results;
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
}
| 9,583 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLDataException;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
/**
* Generic JDBC implementation of the ConnectorTableService.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
@Getter
public class JdbcConnectorTableService implements ConnectorTableService {
@SuppressFBWarnings
protected static final String[] TABLE_TYPES = {"TABLE", "VIEW"};
static final String[] TABLE_TYPE = {"TABLE"};
private static final String EMPTY = "";
private static final String COMMA_SPACE = ", ";
private static final String UNSIGNED = "unsigned";
private static final String ZERO = "0";
private static final char LEFT_PAREN = '(';
private static final char RIGHT_PAREN = ')';
private static final char SPACE = ' ';
protected final DataSource dataSource;
protected final JdbcExceptionMapper exceptionMapper;
private final JdbcTypeConverter typeConverter;
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public JdbcConnectorTableService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcTypeConverter typeConverter,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
this.dataSource = dataSource;
this.typeConverter = typeConverter;
this.exceptionMapper = exceptionMapper;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
final String tableName = name.getTableName();
log.debug("Attempting to delete table {} from database {} for request {}", tableName, databaseName, context);
try (Connection connection = this.getConnection(name.getDatabaseName())) {
JdbcConnectorUtils.executeUpdate(connection, this.getDropTableSql(name, tableName));
log.debug("Deleted table {} from database {} for request {}", tableName, databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Beginning to get table metadata for qualified name {} for request {}", name, context);
try (Connection connection = this.getConnection(name.getDatabaseName())) {
final ImmutableList.Builder<FieldInfo> fields = ImmutableList.builder();
try (ResultSet columns = this.getColumns(connection, name)) {
while (columns.next()) {
final String type = columns.getString("TYPE_NAME");
final String size = columns.getString("COLUMN_SIZE");
final String precision = columns.getString("DECIMAL_DIGITS");
final String sourceType = this.buildSourceType(type, size, precision);
final FieldInfo.FieldInfoBuilder fieldInfo = FieldInfo.builder()
.name(columns.getString("COLUMN_NAME"))
.sourceType(sourceType)
.type(this.typeConverter.toMetacatType(sourceType))
.comment(columns.getString("REMARKS"))
.isNullable(columns.getString("IS_NULLABLE").equals("YES"))
.defaultValue(columns.getString("COLUMN_DEF"));
if (size != null) {
fieldInfo.size(Integer.parseInt(size));
}
fields.add(fieldInfo.build());
}
}
final List<FieldInfo> fieldInfos = fields.build();
// If table does not exist, throw TableNotFoundException.
if (fieldInfos.isEmpty() && !exists(context, name)) {
throw new TableNotFoundException(name);
}
// Set table details
final TableInfo result = TableInfo.builder().name(name).fields(fields.build()).build();
setTableInfoDetails(connection, result);
log.debug("Finished getting table metadata for qualified name {} for request {}", name, context);
return result;
} catch (final SQLException se) {
throw new ConnectorException(se.getMessage(), se);
}
}
/**
* Set the table info details, if any.
*
* @param connection db connection
* @param tableInfo table info
*/
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
}
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Beginning to list table metadata for {} for request {}", name, context);
final ImmutableList.Builder<TableInfo> builder = ImmutableList.builder();
for (final QualifiedName tableName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, tableName));
}
log.debug("Finished listing table metadata for {} for request {}", name, context);
return builder.build();
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Beginning to list tables names for qualified name {} for request {}", name, context);
final String catalog = name.getCatalogName();
final String database = name.getDatabaseName();
try (Connection connection = this.getConnection(database)) {
final List<QualifiedName> names = Lists.newArrayList();
try (ResultSet tables = this.getTables(connection, name, prefix)) {
while (tables.next()) {
names.add(QualifiedName.ofTable(catalog, database, tables.getString("TABLE_NAME")));
}
}
// Does user want sorting?
if (sort != null) {
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getTableName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing tables names for qualified name {} for request {}", name, context);
return results;
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public void rename(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName
) {
final String oldDatabaseName = oldName.getDatabaseName();
final String newDatabaseName = newName.getDatabaseName();
final String oldTableName = oldName.getTableName();
final String newTableName = newName.getTableName();
log.debug(
"Attempting to re-name table {}/{} to {}/{} for request {}",
oldDatabaseName,
oldTableName,
newDatabaseName,
newTableName,
context
);
if (!oldDatabaseName.equals(newDatabaseName)) {
throw new IllegalArgumentException(
"Database names must match and they are " + oldDatabaseName + " and " + newDatabaseName
);
}
try (Connection connection = this.getConnection(oldDatabaseName)) {
connection.setSchema(oldDatabaseName);
JdbcConnectorUtils.executeUpdate(
connection,
this.getRenameTableSql(oldName, oldTableName, newTableName)
);
log.debug(
"Renamed table {}/{} to {}/{} for request {}",
oldDatabaseName,
oldTableName,
newDatabaseName,
newTableName,
context
);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, oldName);
}
}
protected Connection getConnection(@Nonnull @NonNull final String schema) throws SQLException {
final Connection connection = this.dataSource.getConnection();
connection.setSchema(schema);
return connection;
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
try (Connection connection = this.dataSource.getConnection()) {
final String databaseName = name.getDatabaseName();
connection.setSchema(databaseName);
final DatabaseMetaData metaData = connection.getMetaData();
final ResultSet rs = metaData.getTables(databaseName, databaseName, name.getTableName(), TABLE_TYPE);
if (rs.next()) {
result = true;
}
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
return result;
}
/**
* Get the tables. See {@link java.sql.DatabaseMetaData#getTables(String, String, String, String[]) getTables} for
* expected format of the ResultSet columns.
*
* @param connection The database connection to use
* @param name The qualified name of the database to get tables for
* @param prefix An optional database table name prefix to search for
* @return The result set with columns as described in the getTables method from java.sql.DatabaseMetaData
* @throws SQLException on query error
*/
protected ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix
) throws SQLException {
final String database = name.getDatabaseName();
final DatabaseMetaData metaData = connection.getMetaData();
return prefix == null || StringUtils.isEmpty(prefix.getTableName())
? metaData.getTables(database, database, null, TABLE_TYPES)
: metaData
.getTables(
database,
database,
prefix.getTableName() + JdbcConnectorUtils.MULTI_CHARACTER_SEARCH,
TABLE_TYPES
);
}
/**
* Get the columns for a table. See
* {@link java.sql.DatabaseMetaData#getColumns(String, String, String, String) getColumns} for format of the
* ResultSet columns.
*
* @param connection The database connection to use
* @param name The qualified name of the table to get the column descriptions for
* @return The result set of information
* @throws SQLException on query error
*/
protected ResultSet getColumns(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name
) throws SQLException {
final String database = name.getDatabaseName();
final DatabaseMetaData metaData = connection.getMetaData();
return metaData.getColumns(
database,
database,
name.getTableName(),
JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
);
}
/**
* Rebuild a source type definition.
*
* @param type The base type e.g. VARCHAR
* @param size The size if applicable to the {@code type}
* @param precision The precision if applicable to the {@code type} e.g. DECIMAL's
* @return The representation of source type e.g. INTEGER, VARCHAR(50) or DECIMAL(20, 10)
* @throws SQLDataException When size or precision can't be parsed to integers if non null
*/
protected String buildSourceType(
@Nonnull @NonNull final String type,
@Nullable final String size,
@Nullable final String precision
) throws SQLDataException {
if (size != null) {
final int sizeInt;
try {
sizeInt = Integer.parseInt(size);
} catch (final NumberFormatException nfe) {
throw new SQLDataException("Size field could not be converted to integer", nfe);
}
// Make sure if the type is unsigned it's created correctly
final String baseType;
final String afterMagnitude;
final int unsignedIndex = StringUtils.indexOfIgnoreCase(type, UNSIGNED);
if (unsignedIndex != -1) {
baseType = StringUtils.trim(type.substring(0, unsignedIndex));
afterMagnitude = type.substring(unsignedIndex);
} else {
baseType = type;
afterMagnitude = null;
}
if (precision != null) {
final int precisionInt;
try {
precisionInt = Integer.parseInt(precision);
} catch (final NumberFormatException nfe) {
throw new SQLDataException("Precision field could not be converted to integer", nfe);
}
return baseType
+ LEFT_PAREN
+ sizeInt
+ COMMA_SPACE
+ precisionInt
+ RIGHT_PAREN
+ (afterMagnitude != null ? SPACE + afterMagnitude : EMPTY);
} else {
return baseType
+ LEFT_PAREN
+ sizeInt
+ RIGHT_PAREN
+ (afterMagnitude != null ? SPACE + afterMagnitude : EMPTY);
}
} else {
return type;
}
}
/**
* Build the SQL for renaming a table out of the components provided. SQL will be executed.
* @param oldName The fully qualified name for the current table
* @param finalOldTableName The string for what the current table should be called in the sql
* @param finalNewTableName The string for what the new name fo the table should be in the sql
* @return The rename table sql to execute
*/
protected String getRenameTableSql(
final QualifiedName oldName,
final String finalOldTableName,
final String finalNewTableName
) {
return "ALTER TABLE " + finalOldTableName + " RENAME TO " + finalNewTableName;
}
/**
* Get the SQL for dropping the given table.
*
* @param name The fully qualified name of the table
* @param finalTableName The final table name that should be dropped
* @return The SQL to execute to drop the table
*/
protected String getDropTableSql(final QualifiedName name, final String finalTableName) {
return "DROP TABLE " + finalTableName;
}
}
| 9,584 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
/**
* Generic JDBC implementation of the ConnectorPartitionService.
*
* @author tgianos
* @since 1.0.0
*/
public class JdbcConnectorPartitionService implements ConnectorPartitionService {
}
| 9,585 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorUtils.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
/**
* Utility methods for working with JDBC connections.
*
* @author tgianos
* @since 1.0.0
*/
public final class JdbcConnectorUtils extends ConnectorUtils {
/**
* The string used for multi character search in SQL.
*/
public static final String MULTI_CHARACTER_SEARCH = "%";
/**
* The string used for single character search in SQL.
*/
public static final String SINGLE_CHARACTER_SEARCH = "_";
/**
* Utility class constructor private.
*/
protected JdbcConnectorUtils() {
}
/**
* Execute a SQL update statement against the given datasource.
*
* @param connection The connection to attempt to execute an update against
* @param sql The sql to execute
* @return The number of rows updated or exception
* @throws SQLException on error during execution of the update to the underlying SQL data store
*/
static int executeUpdate(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final String sql
) throws SQLException {
try (final Statement statement = connection.createStatement()) {
return statement.executeUpdate(sql);
}
}
}
| 9,586 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Implementations of the Metacat connector service interfaces for generic JDBC connections.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc.services;
| 9,587 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/Client.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.guava.GuavaModule;
import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule;
import com.google.common.base.Preconditions;
import com.netflix.metacat.client.api.TagV1;
import com.netflix.metacat.client.module.JacksonDecoder;
import com.netflix.metacat.client.module.JacksonEncoder;
import com.netflix.metacat.client.module.MetacatErrorDecoder;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.client.api.MetacatV1;
import com.netflix.metacat.client.api.MetadataV1;
import com.netflix.metacat.client.api.PartitionV1;
import com.netflix.metacat.client.api.ResolverV1;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import feign.Feign;
import feign.Request;
import feign.RequestInterceptor;
import feign.Retryer;
import feign.jaxrs.JAXRSContract;
import feign.slf4j.Slf4jLogger;
import lombok.extern.slf4j.Slf4j;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSocketFactory;
import java.util.concurrent.TimeUnit;
/**
* Client to communicate with Metacat. This version depends on the Feign library.
*
* @author amajumdar
*/
@Slf4j
public final class Client {
private final MetacatV1 api;
private final Feign.Builder feignBuilder;
private final String host;
private final PartitionV1 partitionApi;
private final MetadataV1 metadataApi;
private final ResolverV1 resolverApi;
private final TagV1 tagApi;
private Client(
final String host,
final feign.Client client,
final feign.Logger.Level logLevel,
final RequestInterceptor requestInterceptor,
final Retryer retryer,
final Request.Options options
) {
final MetacatJsonLocator metacatJsonLocator = new MetacatJsonLocator();
final ObjectMapper mapper = metacatJsonLocator
.getPrettyObjectMapper()
.copy()
.registerModule(new GuavaModule())
.registerModule(new JaxbAnnotationModule());
log.info("Connecting to {}", host);
this.host = host;
feignBuilder = Feign.builder()
.client(client)
.logger(new Slf4jLogger())
.logLevel(logLevel)
.contract(new JAXRSContract())
.encoder(new JacksonEncoder(mapper))
.decoder(new JacksonDecoder(mapper))
.errorDecoder(new MetacatErrorDecoder(metacatJsonLocator))
.requestInterceptor(requestInterceptor)
.retryer(retryer)
.options(options);
api = getApiClient(MetacatV1.class);
partitionApi = getApiClient(PartitionV1.class);
metadataApi = getApiClient(MetadataV1.class);
resolverApi = getApiClient(ResolverV1.class);
tagApi = getApiClient(TagV1.class);
}
/**
* Returns the client builder.
*
* @return Builder to create the metacat client
*/
public static Builder builder() {
return new Builder();
}
/**
* Returns an API instance that conforms to the given API Type that can communicate with the Metacat server.
*
* @param apiType apiType A JAX-RS annotated Metacat interface
* @param <T> API Resource instance
* @return An instance that implements the given interface and is wired up to communicate with the Metacat server.
*/
public <T> T getApiClient(final Class<T> apiType) {
Preconditions.checkArgument(apiType.isInterface(), "apiType must be an interface");
return feignBuilder.target(apiType, host);
}
/**
* Return an API instance that can be used to interact with the metacat server.
*
* @return An instance api conforming to MetacatV1 interface
*/
public MetacatV1 getApi() {
return api;
}
/**
* Return an API instance that can be used to interact with the metacat server for partitions.
*
* @return An instance api conforming to PartitionV1 interface
*/
public PartitionV1 getPartitionApi() {
return partitionApi;
}
/**
* Return an API instance that can be used to interact with the metacat server for only user metadata.
*
* @return An instance api conforming to MetadataV1 interface
*/
public MetadataV1 getMetadataApi() {
return metadataApi;
}
/**
* Return an API instance that can be used to interact with
* the metacat server for getting the qualified name by uri.
*
* @return An instance api conforming to ResolverV1 interface
*/
public ResolverV1 getResolverApi() {
return resolverApi;
}
/**
* Return an API instance that can be used to interact with
* the metacat server for tagging metadata.
* @return An instance api conforming to TagV1 interface
*/
public TagV1 getTagApi() {
return tagApi;
}
/**
* Builder class to build the metacat client.
*/
public static class Builder {
private String host;
private String userName;
private feign.Client client;
private String clientAppName;
private String jobId;
private String dataTypeContext;
private feign.Logger.Level logLevel;
private Retryer retryer;
private RequestInterceptor requestInterceptor;
private Request.Options requestOptions;
private SSLSocketFactory sslSocketFactory;
private HostnameVerifier hostnameVerifier;
/**
* Sets the SSLSocketFactory. This field is ignored when the full Feign client is specified.
*
* @param sslFactory the SSLSocketFactory
* @return Builder
*/
public Builder withSSLSocketFactory(final SSLSocketFactory sslFactory) {
this.sslSocketFactory = sslFactory;
return this;
}
/**
* Sets the HostnameVerifier. This field is ignored when the full Feign client is specified.
*
* @param hostVerifier the HostnameVerifier
* @return Builder
*/
public Builder withHostnameVerifier(final HostnameVerifier hostVerifier) {
this.hostnameVerifier = hostVerifier;
return this;
}
/**
* Sets the log level for the client.
*
* @param clientLogLevel log level
* @return Builder
*/
public Builder withLogLevel(final feign.Logger.Level clientLogLevel) {
this.logLevel = clientLogLevel;
return this;
}
/**
* Sets the server host name.
*
* @param serverHost server host to connect
* @return Builder
*/
public Builder withHost(final String serverHost) {
this.host = serverHost;
return this;
}
/**
* Sets the retryer logic for the client.
*
* @param clientRetryer retry implementation
* @return Builder
*/
public Builder withRetryer(final Retryer clientRetryer) {
this.retryer = clientRetryer;
return this;
}
/**
* Sets the user name to pass in the request header.
*
* @param requestUserName user name
* @return Builder
*/
public Builder withUserName(final String requestUserName) {
this.userName = requestUserName;
return this;
}
/**
* Sets the application name to pass in the request header.
*
* @param appName application name
* @return Builder
*/
public Builder withClientAppName(final String appName) {
this.clientAppName = appName;
return this;
}
/**
* Sets the job id to pass in the request header.
*
* @param clientJobId job id
* @return Builder
*/
public Builder withJobId(final String clientJobId) {
this.jobId = clientJobId;
return this;
}
/**
* Sets the Client implementation to use.
*
* @param feignClient Feign Client
* @return Builder
*/
public Builder withClient(final feign.Client feignClient) {
this.client = feignClient;
return this;
}
/**
* Sets the data type context to pass in the request header.
*
* @param requestDataTypeContext Data type conext
* @return Builder
*/
public Builder withDataTypeContext(final String requestDataTypeContext) {
this.dataTypeContext = requestDataTypeContext;
return this;
}
/**
* Sets the request interceptor.
*
* @param clientRrequestInterceptor request interceptor
* @return Builder
*/
public Builder withRequestInterceptor(final RequestInterceptor clientRrequestInterceptor) {
this.requestInterceptor = clientRrequestInterceptor;
return this;
}
/**
* Sets the request options.
*
* @param clientRequestOptions request options
* @return Builder
*/
public Builder withRequestOptions(final Request.Options clientRequestOptions) {
this.requestOptions = clientRequestOptions;
return this;
}
/**
* Builds the Metacat client.
*
* @return Client that can be used to make metacat API calls.
*/
public Client build() {
Preconditions.checkArgument(userName != null, "User name cannot be null");
Preconditions.checkArgument(clientAppName != null, "Client application name cannot be null");
if (host == null) {
host = System.getProperty("netflix.metacat.host", System.getenv("NETFLIX_METACAT_HOST"));
}
Preconditions.checkArgument(host != null, "Host cannot be null");
if (retryer == null) {
//
// Retry exponentially with a starting delay of 500ms for upto 3 retries.
//
retryer = new Retryer.Default(TimeUnit.MILLISECONDS.toMillis(500), TimeUnit.MINUTES.toMillis(2), 3);
}
final RequestInterceptor interceptor = template -> {
template.header(MetacatRequestContext.HEADER_KEY_USER_NAME, userName);
template.header(MetacatRequestContext.HEADER_KEY_CLIENT_APP_NAME, clientAppName);
template.header(MetacatRequestContext.HEADER_KEY_JOB_ID, jobId);
template.header(MetacatRequestContext.HEADER_KEY_DATA_TYPE_CONTEXT, dataTypeContext);
if (requestInterceptor != null) {
requestInterceptor.apply(template);
}
};
if (requestOptions == null) {
//
// connection timeout: 30secs, socket timeout: 60secs
//
requestOptions = new Request.Options((int) TimeUnit.SECONDS.toMillis(30),
(int) TimeUnit.MINUTES.toMillis(1));
}
if (logLevel == null) {
logLevel = feign.Logger.Level.BASIC;
}
if (client == null) {
client = new feign.Client.Default(sslSocketFactory, hostnameVerifier);
}
return new Client(host, client, logLevel, interceptor, retryer, requestOptions);
}
}
}
| 9,588 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Client library for Metacat.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.client;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,589 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonDecoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.RuntimeJsonMappingException;
import feign.Response;
import feign.codec.Decoder;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.Type;
import java.net.HttpURLConnection;
/**
* Decoder for Metacat response.
*
* @author amajumdar
*/
public class JacksonDecoder implements Decoder {
private static final String NO_CONTENT_MESSAGE = "No content to map due to end-of-input";
private final ObjectMapper mapper;
/**
* Constructor.
*
* @param mapper Jackson mapper for Metacat response.
*/
public JacksonDecoder(@Nonnull @NonNull final ObjectMapper mapper) {
this.mapper = mapper;
}
/**
* {@inheritDoc}
*/
@Override
public Object decode(final Response response, final Type type) throws IOException {
if (
response.status() == HttpURLConnection.HTTP_NO_CONTENT
|| response.body() == null
|| (response.body().length() != null && response.body().length() == 0)
) {
return null;
}
try (final Reader reader = response.body().asReader()) {
return this.mapper.readValue(reader, this.mapper.constructType(type));
} catch (final JsonMappingException jme) {
// The case where for whatever reason (most likely bad design) where the server returned OK and
// trying to de-serialize the content had no content (e.g. the return status should have been no-content)
if (response.status() == HttpURLConnection.HTTP_OK
&& jme.getMessage().startsWith(NO_CONTENT_MESSAGE)) {
return null;
}
throw jme;
} catch (final RuntimeJsonMappingException e) {
if (e.getCause() != null && e.getCause() instanceof IOException) {
throw IOException.class.cast(e.getCause());
}
throw e;
}
}
}
| 9,590 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/MetacatErrorDecoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Strings;
import com.netflix.metacat.common.exception.MetacatAlreadyExistsException;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.exception.MetacatPreconditionFailedException;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.exception.MetacatUnAuthorizedException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonException;
import feign.Response;
import feign.RetryableException;
import feign.Util;
import lombok.AllArgsConstructor;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Date;
/**
* Module that provides a error decoder, used to parse errors.
*
* @author amajumdar
*/
@AllArgsConstructor
public class MetacatErrorDecoder extends feign.codec.ErrorDecoder.Default {
private final MetacatJson metacatJson;
/**
* {@inheritDoc}
*/
@Override
public Exception decode(final String methodKey, final Response response) {
try {
String message = "";
if (response.body() != null) {
message = Util.toString(response.body().asReader());
try {
final ObjectNode body = metacatJson.parseJsonObject(message);
message = body.path("error").asText();
if (Strings.isNullOrEmpty(message)) {
message = body.path("message").asText("No error message supplied.");
}
} catch (final MetacatJsonException ignored) {
}
}
switch (response.status()) {
case 501: //NOT IMPLEMENTED
case 415: //UNSUPPORTED_MEDIA_TYPE
return new MetacatNotSupportedException(message);
case 400: //BAD_REQUEST
return new MetacatBadRequestException(message);
case 403: //Forbidden
return new MetacatUnAuthorizedException(message);
case 404: //NOT_FOUND
return new MetacatNotFoundException(message);
case 409: //CONFLICT
return new MetacatAlreadyExistsException(message);
case 412: // PRECONDITION_FAILED
return new MetacatPreconditionFailedException(message);
case 429:
return new RetryableException(response.status(), message,
response.request() == null ? null : response.request().httpMethod(),
new MetacatTooManyRequestsException(message),
Date.from(Instant.now().plus(1, ChronoUnit.MINUTES)), response.request());
case 500: //INTERNAL_SERVER_ERROR
case 503: //SERVICE_UNAVAILABLE
return new RetryableException(response.status(), message,
response.request() == null ? null : response.request().httpMethod(),
new MetacatException(message), null, response.request());
default:
return new MetacatException(message);
}
} catch (final IOException e) {
return super.decode(methodKey, response);
}
}
}
| 9,591 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonEncoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import feign.RequestTemplate;
import feign.codec.EncodeException;
import feign.codec.Encoder;
import java.lang.reflect.Type;
/**
* Encoder for Metacat request.
*
* @author amajumdar
*/
public class JacksonEncoder implements Encoder {
private final ObjectMapper mapper;
/**
* Constructor.
*
* @param mapper Jackson mapper for Metacat request
*/
public JacksonEncoder(final ObjectMapper mapper) {
this.mapper = mapper;
}
/**
* Converts objects to an appropriate representation in the template.
*
* @param object what to encode as the request body.
* @param bodyType the type the object should be encoded as. {@code Map<String, ?>}, if form
* encoding.
* @param template the request template to populate.
* @throws feign.codec.EncodeException when encoding failed due to a checked exception.
*/
@Override
public void encode(final Object object, final Type bodyType, final RequestTemplate template)
throws EncodeException {
try {
template.body(mapper.writeValueAsString(object));
} catch (JsonProcessingException e) {
throw new EncodeException(e.getMessage(), e);
}
}
}
| 9,592 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Client Feign related library for Metacat.
*
* @author amajumdar
*/
package com.netflix.metacat.client.module;
| 9,593 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/TagV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TagCreateRequestDto;
import com.netflix.metacat.common.dto.TagRemoveRequestDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
import java.util.Set;
/**
* APIs to manipulate the tags.
*
* @author amajumdar
*/
@Path("mds/v1/tag")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface TagV1 {
/**
* Return the list of tags.
*
* @return list of tags
*/
@GET
@Path("tags")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> getTags();
/**
* Returns the list of qualified names for the given input.
*
* @param includeTags Set of matching tags
* @param excludeTags Set of un-matching tags
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @param type Qualified name type category, database, table
* @return list of qualified names
*/
@GET
@Path("list")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> list(
@QueryParam("include")
Set<String> includeTags,
@QueryParam("exclude")
Set<String> excludeTags,
@QueryParam("sourceName")
String sourceName,
@QueryParam("databaseName")
String databaseName,
@QueryParam("tableName")
String tableName,
@QueryParam("type")
QualifiedName.Type type
);
/**
* Returns the list of qualified names that are tagged with tags containing the given tagText.
*
* @param tag Tag partial text
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @return list of qualified names
*/
@GET
@Path("search")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> search(
@QueryParam("tag")
String tag,
@QueryParam("sourceName")
String sourceName,
@QueryParam("databaseName")
String databaseName,
@QueryParam("tableName")
String tableName
);
/**
* Sets the tags on the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param tags set of tags
* @return set of tags
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> setTableTags(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
Set<String> tags
);
/**
* Sets the tags on the given qualified name.
*
* @param tagCreateRequestDto tag create request dto
* @return set of tags
*/
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> setTags(
TagCreateRequestDto tagCreateRequestDto
);
/**
* Remove the tags on the given qualified name.
*
* @param tagRemoveRequestDto tag remove request dto
*/
@DELETE
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void removeTags(
TagRemoveRequestDto tagRemoveRequestDto
);
/**
* Remove the tags from the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param deleteAll True if all tags need to be removed
* @param tags Tags to be removed from the given table
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void removeTableTags(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@DefaultValue("false")
@QueryParam("all")
Boolean deleteAll,
Set<String> tags
);
}
| 9,594 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/SearchMetacatV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.TableDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Search APIs for metacat that queries the search store.
* @author amajumdar
*/
@Path("mds/v1/search")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface SearchMetacatV1 {
/**
* Searches the list of tables for the given search string.
* @param searchString search string
* @return list of tables
*/
@GET
@Path("table")
@Consumes(MediaType.APPLICATION_JSON)
List<TableDto> searchTables(
@QueryParam("q")
String searchString
);
}
| 9,595 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/PartitionV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.SortOrder;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Metacat API for managing partition.
*
* @author amajumdar
*/
@Path("mds/v1/partition")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface PartitionV1 {
/**
* Delete named partitions from a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionIds lis of partition names
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deletePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
List<String> partitionIds
);
/**
* Delete partitions for the given view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName metacat view name
* @param partitionIds list of partition names
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deletePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
List<String> partitionIds
);
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata
);
/**
* Return list of partitions for a metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a metacat view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata
);
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitionsForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partitions for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitionsForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeys(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeys(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeysForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeysForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUris(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUris(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUrisForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition uris for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUrisForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Add/update partitions to the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
PartitionsSaveResponseDto savePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
PartitionsSaveRequestDto partitionsSaveRequestDto
);
/**
* Add/update partitions to the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
PartitionsSaveResponseDto savePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
PartitionsSaveRequestDto partitionsSaveRequestDto
);
/**
* Get the partition count for the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return partition count for the given table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/count")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Integer getPartitionCount(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Get the partition count for the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return partition count for the given view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/count")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Integer getPartitionCount(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
}
| 9,596 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/MetacatV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import com.netflix.metacat.common.dto.DatabaseCreateRequestDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.HEAD;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Metacat API for managing catalog/database/table/mview.
*
* @author amajumdar
*/
@Path("mds/v1")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface MetacatV1 {
/**
* Creates a new catalog.
*
* @param createCatalogDto catalog
*/
@POST
@Path("catalog")
void createCatalog(CreateCatalogDto createCatalogDto);
/**
* Creates the given database in the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
* @param databaseCreateRequestDto database create request
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void createDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
DatabaseCreateRequestDto databaseCreateRequestDto
);
/**
* Creates a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table TableDto with table details
* @return created <code>TableDto</code> table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto createTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
TableDto table
);
/**
* Creates a metacat view. A staging table that can contain partitions referring to the table partition locations.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param snapshot boolean to snapshot or not
* @param filter filter expression to use
* @return created <code>TableDto</code> mview
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto createMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@DefaultValue("false")
@QueryParam("snapshot")
Boolean snapshot,
@QueryParam("filter")
String filter
);
/**
* Deletes the given database from the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deleteDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName
);
/**
* Delete table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return deleted <code>TableDto</code> table.
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto deleteTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Delete metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return deleted <code>TableDto</code> mview.
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto deleteMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
/**
* Get the catalog by name.
*
* @param catalogName catalog name
* @return catalog
*/
@GET
@Path("catalog/{catalog-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
CatalogDto getCatalog(
@PathParam("catalog-name")
String catalogName
);
/**
* List registered catalogs.
*
* @return registered catalogs.
*/
@GET
@Path("catalog")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<CatalogMappingDto> getCatalogNames();
/**
* Get the database with the list of table names under it.
*
* @param catalogName catalog name
* @param databaseName database name
* @param includeUserMetadata true if details should include user metadata
* @param includeTableNames if true, include the list of table names
* @return database with details
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
DatabaseDto getDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@DefaultValue("true")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
@DefaultValue("true")
@QueryParam("includeTableNames")
Boolean includeTableNames
);
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @return table
*/
default TableDto getTable(
String catalogName,
String databaseName,
String tableName,
Boolean includeInfo,
Boolean includeDefinitionMetadata,
Boolean includeDataMetadata
) {
return getTable(catalogName, databaseName, tableName, includeInfo,
includeDefinitionMetadata, includeDataMetadata, false);
}
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @param includeInfoDetails true if the more info details to be included
* @return table
*/
default TableDto getTable(
String catalogName,
String databaseName,
String tableName,
Boolean includeInfo,
Boolean includeDefinitionMetadata,
Boolean includeDataMetadata,
Boolean includeInfoDetails
) {
return getTable(catalogName, databaseName, tableName, includeInfo,
includeDefinitionMetadata, includeDataMetadata, includeInfoDetails, false);
}
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @param includeInfoDetails true if the more info details to be included
* @param includeMetadataLocationOnly true if only metadata location needs to be included.
* All other flags are ignored if this is set to true.
* @return table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto getTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@DefaultValue("true")
@QueryParam("includeInfo")
Boolean includeInfo,
@DefaultValue("true")
@QueryParam("includeDefinitionMetadata")
Boolean includeDefinitionMetadata,
@DefaultValue("true")
@QueryParam("includeDataMetadata")
Boolean includeDataMetadata,
@DefaultValue("false")
@QueryParam("includeInfoDetails")
Boolean includeInfoDetails,
@DefaultValue("false")
@QueryParam("includeMetadataLocationOnly")
Boolean includeMetadataLocationOnly
);
/**
* Returns true, if table exists.
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return true, if table exists.
*/
default boolean doesTableExist(String catalogName, String databaseName, String tableName) {
boolean result = true;
try {
tableExists(catalogName, databaseName, tableName);
} catch (MetacatNotFoundException e) {
result = false;
}
return result;
}
/**
* Check if the table exists.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
*/
@HEAD
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
void tableExists(@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName);
/**
* Returns a filtered list of table names.
* @param catalogName catalog name
* @param filter filter expression
* @param limit list size
* @return list of table names
*/
@GET
@Path("catalog/{catalog-name}/table-names")
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> getTableNames(
@PathParam("catalog-name")
final String catalogName,
@QueryParam("filter")
final String filter,
@QueryParam("limit")
Integer limit
);
/**
* Returns a filtered list of table names.
* @param catalogName catalog name
* @param databaseName database name
* @param filter filter expression
* @param limit list size
* @return list of table names
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table-names")
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> getTableNames(
@PathParam("catalog-name")
final String catalogName,
@PathParam("database-name")
final String databaseName,
@QueryParam("filter")
final String filter,
@QueryParam("limit")
Integer limit
);
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @return list of metacat view names.
*/
@GET
@Path("catalog/{catalog-name}/mviews")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<NameDateDto> getMViews(
@PathParam("catalog-name")
String catalogName
);
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return List of metacat view names.
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mviews")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<NameDateDto> getMViews(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Get metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return metacat view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto getMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
/**
* Rename table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param newTableName new table name
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/rename")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void renameTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("newTableName")
String newTableName
);
/**
* Updates an existing catalog.
*
* @param catalogName catalog name
* @param createCatalogDto catalog
*/
@PUT
@Path("catalog/{catalog-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void updateCatalog(
@PathParam("catalog-name")
String catalogName,
CreateCatalogDto createCatalogDto
);
/**
* Updates the given database in the given catalog.
*
* @param catalogName catalog name.
* @param databaseName database name.
* @param databaseUpdateRequestDto database
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void updateDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
DatabaseCreateRequestDto databaseUpdateRequestDto
);
/**
* Update metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param table view
* @return updated metacat view
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto updateMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
TableDto table
);
/**
* Update table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table table
* @return table
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto updateTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
TableDto table
);
}
| 9,597 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/ResolverV1.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.ResolveByUriRequestDto;
import com.netflix.metacat.common.dto.ResolveByUriResponseDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* ResolverV1.
*
* @author zhenl
* @since 1.0.0
*/
@Path("mds/v1/resolver")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface ResolverV1 {
/**
* resolveByUri.
*
* @param prefixSearch use prefix search
* @param resolveByUriRequestDto resolveByUriRequest
* @return response from uri search
*/
@POST
ResolveByUriResponseDto resolveByUri(
@DefaultValue("false")
@QueryParam("prefixSearch")
Boolean prefixSearch,
ResolveByUriRequestDto resolveByUriRequestDto);
/**
* isUriUsedMoreThanOnce.
*
* @param prefixSearch use prefix search
* @param resolveByUriRequestDto resolveByUriRequest
* @return response of check if a uri used more than once
*/
@POST
@Path("isUriUsedMoreThanOnce")
Response isUriUsedMoreThanOnce(
@DefaultValue("false")
@QueryParam("prefixSearch")
Boolean prefixSearch,
ResolveByUriRequestDto resolveByUriRequestDto);
}
| 9,598 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/MetadataV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DataMetadataDto;
import com.netflix.metacat.common.dto.DataMetadataGetRequestDto;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.dto.SortOrder;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
import java.util.Set;
/**
* API to manipulate user metadata.
*
* @author amajumdar
*/
@Path("mds/v1/metadata")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface MetadataV1 {
/**
* Returns the data metadata.
*
* @param metadataGetRequestDto metadata request
* @return data metadata
*/
@POST
@Path("data")
DataMetadataDto getDataMetadata(DataMetadataGetRequestDto metadataGetRequestDto);
/**
* Returns the list of definition metadata.
*
* @param sortBy Sort the list by this value
* @param sortOrder Sorting order to use
* @param offset Offset of the list returned
* @param limit Size of the list
* @param lifetime has lifetime set
* @param type Type of the metadata item. Values: database, table, partition
* @param name Text that matches the name of the metadata (accepts sql wildcards)
* @param dataProperties Set of data property names.
* Filters the returned list that only contains the given property names
* @return list of definition metadata
*/
@GET
@Path("definition/list")
List<DefinitionMetadataDto> getDefinitionMetadataList(
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("lifetime")
Boolean lifetime,
@QueryParam("type")
String type,
@QueryParam("name")
String name,
@QueryParam("data-property")
Set<String> dataProperties
);
/**
* Returns the list of qualified names owned by the given owners.
*
* @param owners set of owners
* @return the list of qualified names owned by the given owners
*/
@GET
@Path("searchByOwners")
List<QualifiedName> searchByOwners(
@QueryParam("owner")
Set<String> owners
);
/**
* Delete the definition metadata for the given name.
*
* @param name Name of definition metadata to be deleted
* @param force If true, deletes the metadata without checking if the database/table/partition exists
*/
@DELETE
@Path("definition")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deleteDefinitionMetadata(
@QueryParam("name")
String name,
@DefaultValue("false")
@QueryParam("force")
Boolean force
);
}
| 9,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.