index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/MetacatHMSHandler.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.NoopRegistry;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.metrics.Metrics;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.MetaStoreEndFunctionContext;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.RawStoreProxy;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.thrift.TException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
/**
* This is an extension of the HiveMetastore. This provides multi-tenancy to the hive metastore.
*
* @author amajumdar
* @since 1.0.0
*/
public class MetacatHMSHandler extends HiveMetaStore.HMSHandler implements IMetacatHMSHandler {
private Pattern partitionValidationPattern;
private int nextSerialNum;
private final Registry registry;
private ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
@Override
protected synchronized Integer initialValue() {
return nextSerialNum++;
}
};
private final ThreadLocal<RawStore> threadLocalMS =
new ThreadLocal<RawStore>() {
@Override
protected synchronized RawStore initialValue() {
return null;
}
};
private final ThreadLocal<Configuration> threadLocalConf =
new ThreadLocal<Configuration>() {
@Override
protected synchronized Configuration initialValue() {
return null;
}
};
/**
* Constructor.
*
* @param name client name
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name) throws MetaException {
this(name, new HiveConf(HiveMetaStore.HMSHandler.class));
}
/**
* Constructor.
*
* @param name client name
* @param conf hive configurations
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name, final HiveConf conf) throws MetaException {
this(name, conf, new NoopRegistry(), true);
}
/**
* Constructor.
*
* @param name client name
* @param conf hive configurations
* @param registry registry
* @param init initialize if true.
* @throws MetaException exception
*/
public MetacatHMSHandler(final String name, final HiveConf conf, final Registry registry, final boolean init)
throws MetaException {
super(name, conf, init);
this.registry = registry;
final String partitionValidationRegex =
getHiveConf().getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN);
if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
}
@Override
public RawStore getMS() throws MetaException {
RawStore ms = threadLocalMS.get();
if (ms == null) {
ms = newRawStore();
ms.verifySchema();
threadLocalMS.set(ms);
ms = threadLocalMS.get();
}
return ms;
}
@Override
public void setConf(final Configuration conf) {
threadLocalConf.set(conf);
final RawStore ms = threadLocalMS.get();
if (ms != null) {
ms.setConf(conf); // reload if DS related configuration is changed
}
}
@Override
public Configuration getConf() {
Configuration conf = threadLocalConf.get();
if (conf == null) {
conf = new Configuration(getHiveConf());
threadLocalConf.set(conf);
}
return conf;
}
private RawStore newRawStore() throws MetaException {
final Configuration conf = getConf();
final String rawStoreClassName = getHiveConf().getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
LOG.info(String.format("%s: Opening raw store with implemenation class: %s", threadLocalId.get(),
rawStoreClassName));
return RawStoreProxy.getProxy(getHiveConf(), conf, rawStoreClassName, threadLocalId.get());
}
private void logInfo(final String m) {
LOG.info(threadLocalId.get().toString() + ": " + m);
}
private String startFunction(final String function, final String extraLogInfo) {
incrementCounter(function);
logInfo((getIpAddress() == null ? "" : "source:" + getIpAddress() + " ") + function + extraLogInfo);
try {
Metrics.startScope(function);
} catch (IOException e) {
LOG.debug("Exception when starting metrics scope"
+ e.getClass().getName() + " " + e.getMessage(), e);
}
return function;
}
private String startFunction(final String function) {
return startFunction(function, "");
}
private void endFunction(final String function, final boolean successful, final Exception e,
final String inputTableName) {
endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
}
private void endFunction(final String function, final MetaStoreEndFunctionContext context) {
try {
Metrics.endScope(function);
} catch (IOException e) {
LOG.debug("Exception when closing metrics scope" + e);
}
}
private static MetaException newMetaException(final Exception e) {
final MetaException me = new MetaException(e.toString());
me.initCause(e);
return me;
}
private static class PartValEqWrapper {
private Partition partition;
/**
* Constructor.
*
* @param partition partition
*/
PartValEqWrapper(final Partition partition) {
this.partition = partition;
}
@Override
public int hashCode() {
return partition.isSetValues() ? partition.getValues().hashCode() : 0;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof PartValEqWrapper)) {
return false;
}
final Partition p1 = this.partition;
final Partition p2 = ((PartValEqWrapper) obj).partition;
if (!p1.isSetValues() || !p2.isSetValues()) {
return p1.isSetValues() == p2.isSetValues();
}
if (p1.getValues().size() != p2.getValues().size()) {
return false;
}
for (int i = 0; i < p1.getValues().size(); ++i) {
final String v1 = p1.getValues().get(i);
final String v2 = p2.getValues().get(i);
if (!Objects.equals(v1, v2)) {
return false;
}
}
return true;
}
}
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws NoSuchObjectException Exception if table does not exists
* @throws MetaException Exception if
* @throws TException any internal exception
*/
@SuppressWarnings({"checkstyle:methodname"})
public boolean add_drop_partitions(final String databaseName,
final String tableName, final List<Partition> addParts,
final List<List<String>> dropParts, final boolean deleteData)
throws NoSuchObjectException, MetaException, TException {
startFunction("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
if (addParts.size() == 0 && dropParts.size() == 0) {
return true;
}
for (List<String> partVals : dropParts) {
LOG.info("Drop Partition values:" + partVals);
}
for (Partition part : addParts) {
LOG.info("Add Partition values:" + part);
}
boolean ret = false;
Exception ex = null;
try {
ret = addDropPartitionsCore(getMS(), databaseName, tableName, addParts, dropParts, false, null);
} catch (Exception e) {
ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
} else if (e instanceof InvalidObjectException) {
throw (InvalidObjectException) e;
} else if (e instanceof AlreadyExistsException) {
throw (AlreadyExistsException) e;
} else if (e instanceof NoSuchObjectException) {
throw (NoSuchObjectException) e;
} else {
throw newMetaException(e);
}
} finally {
endFunction("drop_partitions", ret, ex, tableName);
}
return ret;
}
private boolean addDropPartitionsCore(
final RawStore ms, final String databaseName, final String tableName, final List<Partition> addParts,
final List<List<String>> dropParts, final boolean ifNotExists, final EnvironmentContext envContext)
throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException,
IOException, InvalidInputException, TException {
logInfo("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
boolean success = false;
Table tbl = null;
// Ensures that the list doesn't have dups, and keeps track of directories we have created.
final Map<PartValEqWrapper, Boolean> addedPartitions = new HashMap<PartValEqWrapper, Boolean>();
final List<Partition> existingParts = new ArrayList<Partition>();
List<Partition> result = null;
try {
ms.openTransaction();
tbl = get_table(databaseName, tableName);
if (tbl == null) {
throw new NoSuchObjectException("Unable to add partitions because "
+ "database or table " + databaseName + "." + tableName + " does not exist");
}
// Drop the parts first
dropPartitionsCoreNoTxn(ms, tbl, dropParts);
// Now add the parts
result = addPartitionsCoreNoTxn(ms, tbl, addParts, ifNotExists, addedPartitions, existingParts);
if (!result.isEmpty() && !ms.addPartitions(databaseName, tableName, result)) {
throw new MetaException("Unable to add partitions");
}
success = ms.commitTransaction();
} finally {
if (!success) {
ms.rollbackTransaction();
// Clean up the result of adding partitions
for (Map.Entry<PartValEqWrapper, Boolean> e : addedPartitions.entrySet()) {
if (e.getValue()) {
getWh().deleteDir(new Path(e.getKey().partition.getSd().getLocation()), true);
// we just created this directory - it's not a case of pre-creation, so we nuke
}
}
}
}
return success;
}
private boolean startAddPartition(
final RawStore ms, final Partition part, final boolean ifNotExists) throws MetaException, TException {
MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
partitionValidationPattern);
final boolean doesExist = ms.doesPartitionExist(
part.getDbName(), part.getTableName(), part.getValues());
if (doesExist && !ifNotExists) {
throw new AlreadyExistsException("Partition already exists: " + part);
}
return !doesExist;
}
/**
* Handles the location for a partition being created.
*
* @param tbl Table.
* @param part Partition.
* @return Whether the partition SD location is set to a newly created directory.
*/
private boolean createLocationForAddedPartition(
final Table tbl, final Partition part) throws MetaException {
Path partLocation = null;
String partLocationStr = null;
if (part.getSd() != null) {
partLocationStr = part.getSd().getLocation();
}
if (partLocationStr == null || partLocationStr.isEmpty()) {
// set default location if not specified and this is
// a physical table partition (not a view)
if (tbl.getSd().getLocation() != null) {
partLocation = new Path(tbl.getSd().getLocation(), Warehouse
.makePartName(tbl.getPartitionKeys(), part.getValues()));
}
} else {
if (tbl.getSd().getLocation() == null) {
throw new MetaException("Cannot specify location for a view partition");
}
partLocation = getWh().getDnsPath(new Path(partLocationStr));
}
boolean result = false;
if (partLocation != null) {
part.getSd().setLocation(partLocation.toString());
final boolean doFileSystemCalls = getHiveConf().getBoolean("hive.metastore.use.fs.calls", true)
|| (tbl.getParameters() != null && Boolean.parseBoolean(tbl.getParameters()
.getOrDefault("hive.metastore.use.fs.calls", "false")));
if (doFileSystemCalls) {
// Check to see if the directory already exists before calling
// mkdirs() because if the file system is read-only, mkdirs will
// throw an exception even if the directory already exists.
if (!getWh().isDir(partLocation)) {
//
// Added to track the number of partition locations that do not exist before
// adding the partition metadata
registry.counter(HiveMetrics.CounterHivePartitionPathIsNotDir.getMetricName(),
"database", tbl.getDbName(), "table", tbl.getTableName()).increment();
logInfo(String.format("Partition location %s does not exist for table %s",
partLocation, tbl.getTableName()));
if (!getWh().mkdirs(partLocation, true)) {
throw new MetaException(partLocation + " is not a directory or unable to create one");
}
}
result = true;
}
}
return result;
}
private void initializeAddedPartition(
final Table tbl, final Partition part, final boolean madeDir) throws MetaException {
initializeAddedPartition(tbl, new PartitionSpecProxy.SimplePartitionWrapperIterator(part), madeDir);
}
@SuppressFBWarnings
private void initializeAddedPartition(
final Table tbl, final PartitionSpecProxy.PartitionIterator part,
final boolean madeDir) throws MetaException {
// set create time
final long time = System.currentTimeMillis() / 1000;
part.setCreateTime((int) time);
if (part.getParameters() == null || part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
}
// Inherit table properties into partition properties.
final Map<String, String> tblParams = tbl.getParameters();
final String inheritProps = getHiveConf().getVar(HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
// Default value is empty string in which case no properties will be inherited.
// * implies all properties needs to be inherited
Set<String> inheritKeys = new HashSet<String>(Arrays.asList(inheritProps.split(",")));
if (inheritKeys.contains("*")) {
inheritKeys = tblParams.keySet();
}
for (String key : inheritKeys) {
final String paramVal = tblParams.get(key);
if (null != paramVal) { // add the property only if it exists in table properties
part.putToParameters(key, paramVal);
}
}
}
private List<Partition> addPartitionsCoreNoTxn(
final RawStore ms, final Table tbl, final List<Partition> parts, final boolean ifNotExists,
final Map<PartValEqWrapper, Boolean> addedPartitions, final List<Partition> existingParts)
throws MetaException, InvalidObjectException, AlreadyExistsException, TException {
logInfo("add_partitions");
final String dbName = tbl.getDbName();
final String tblName = tbl.getTableName();
final List<Partition> result = new ArrayList<Partition>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + part);
}
final boolean shouldAdd = startAddPartition(ms, part, ifNotExists);
if (!shouldAdd) {
existingParts.add(part);
LOG.info("Not adding partition " + part + " as it already exists");
continue;
}
final boolean madeDir = createLocationForAddedPartition(tbl, part);
if (addedPartitions.put(new PartValEqWrapper(part), madeDir) != null) {
// Technically, for ifNotExists case, we could insert one and discard the other
// because the first one now "exists", but it seems better to report the problem
// upstream as such a command doesn't make sense.
throw new MetaException("Duplicate partitions in the list: " + part);
}
initializeAddedPartition(tbl, part, madeDir);
result.add(part);
}
return result;
}
private List<Partition> dropPartitionsCoreNoTxn(
final RawStore ms, final Table tbl, final List<List<String>> partsValues)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
final List<Partition> deletedPartitions = new ArrayList<Partition>();
Partition part = null;
final String dbName = tbl.getDbName();
final String tblName = tbl.getTableName();
for (List<String> partValues : partsValues) {
part = ms.getPartition(dbName, tblName, partValues);
if (part == null) {
throw new NoSuchObjectException("Partition doesn't exist. "
+ partValues);
}
if (!ms.dropPartition(dbName, tblName, partValues)) {
throw new MetaException("Unable to drop partition");
}
deletedPartitions.add(part);
}
return deletedPartitions;
}
}
| 1,600 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes to extend hive metastore.
*
* @author amajumdar
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.metastore;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,601 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.metacat.connector.hive.util.PartitionUtil;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* HiveConnectorFastPartitionService.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFastPartitionService extends HiveConnectorPartitionService {
private DirectSqlGetPartition directSqlGetPartition;
private DirectSqlSavePartition directSqlSavePartition;
private Warehouse warehouse;
private Registry registry;
@VisibleForTesting
private IcebergTableHandler icebergTableHandler;
/**
* Constructor.
*
* @param context connector context
* @param metacatHiveClient hive client
* @param warehouse hive warehouse
* @param hiveMetacatConverters hive converter
* @param directSqlGetPartition service to get partitions
* @param directSqlSavePartition service to save partitions
* @param icebergTableHandler iceberg table handler
*/
public HiveConnectorFastPartitionService(
final ConnectorContext context,
final IMetacatHiveClient metacatHiveClient,
final Warehouse warehouse,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlGetPartition directSqlGetPartition,
final DirectSqlSavePartition directSqlSavePartition,
final IcebergTableHandler icebergTableHandler
) {
super(context, metacatHiveClient, hiveMetacatConverters);
this.warehouse = warehouse;
this.directSqlGetPartition = directSqlGetPartition;
this.directSqlSavePartition = directSqlSavePartition;
this.registry = context.getRegistry();
this.icebergTableHandler = icebergTableHandler;
}
/**
* Number of partitions for the given table.
*
* @param tableName tableName
* @return Number of partitions
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final TableInfo tableInfo
) {
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
return directSqlGetPartition.getPartitionCount(requestContext, tableName);
}
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
: directSqlGetPartition.getPartitions(requestContext, tableName, partitionsRequest);
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
.stream().map(info -> info.getName().getPartitionName()).collect(Collectors.toList())
:
directSqlGetPartition.getPartitionKeys(requestContext, tableName, partitionsRequest);
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionUris(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo
) {
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest);
}
/**
* getPartitionNames.
*
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Override
public Map<String, List<QualifiedName>> getPartitionNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return directSqlGetPartition.getPartitionNames(context, uris, prefixSearch);
}
@Override
protected Map<String, PartitionHolder> getPartitionsByNames(final Table table, final List<String> partitionNames) {
//This is internal call, always turn off the auditTable processing
return directSqlGetPartition.getPartitionHoldersByNames(table, partitionNames, true);
}
protected void addUpdateDropPartitions(final QualifiedName tableQName,
final Table table,
final List<String> partitionNames,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionHolders,
final Set<String> deletePartitionNames) {
final boolean useHiveFastServiceForSavePartitions = Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault("hive.use.embedded.sql.save.partitions", "false"))
|| (table.getParameters() != null && Boolean.parseBoolean(table.getParameters()
.getOrDefault("hive.use.embedded.sql.save.partitions", "false")));
if (useHiveFastServiceForSavePartitions) {
final long start = registry.clock().wallTime();
try {
if (!existingPartitionHolders.isEmpty()) {
final List<PartitionInfo> existingPartitionInfos = existingPartitionHolders.stream()
.map(PartitionHolder::getPartitionInfo).collect(Collectors.toList());
copyTableSdToPartitionInfosSd(existingPartitionInfos, table);
createLocationForPartitions(tableQName, existingPartitionInfos, table);
}
copyTableSdToPartitionInfosSd(addedPartitionInfos, table);
createLocationForPartitions(tableQName, addedPartitionInfos, table);
} finally {
registry.timer(registry
.createId(HiveMetrics.TagCreatePartitionLocations.getMetricName()).withTags(tableQName.parts()))
.record(registry.clock().wallTime() - start, TimeUnit.MILLISECONDS);
}
directSqlSavePartition.addUpdateDropPartitions(tableQName, table, addedPartitionInfos,
existingPartitionHolders, deletePartitionNames);
} else {
super.addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos,
existingPartitionHolders, deletePartitionNames);
}
}
private void createLocationForPartitions(final QualifiedName tableQName,
final List<PartitionInfo> partitionInfos, final Table table) {
final boolean doFileSystemCalls = Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault("hive.metastore.use.fs.calls", "true"))
|| (table.getParameters() != null && Boolean.parseBoolean(table.getParameters()
.getOrDefault("hive.metastore.use.fs.calls", "false")));
partitionInfos.forEach(partitionInfo ->
createLocationForPartition(tableQName, partitionInfo, table, doFileSystemCalls));
}
private void createLocationForPartition(final QualifiedName tableQName,
final PartitionInfo partitionInfo,
final Table table,
final boolean doFileSystemCalls) {
String location = partitionInfo.getSerde().getUri();
Path path = null;
if (StringUtils.isBlank(location)) {
if (table.getSd() == null || table.getSd().getLocation() == null) {
throw new InvalidMetaException(tableQName, null);
}
final String partitionName = partitionInfo.getName().getPartitionName();
final List<String> partValues = PartitionUtil
.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
path = new Path(table.getSd().getLocation(), escapedPartName);
} else {
try {
path = warehouse.getDnsPath(new Path(location));
} catch (Exception e) {
throw new InvalidMetaException(String.format("Failed forming partition location; %s", location), e);
}
}
if (path != null) {
location = path.toString();
partitionInfo.getSerde().setUri(location);
if (doFileSystemCalls) {
registry.counter(registry.createId(HiveMetrics.CounterHivePartitionFileSystemCall.getMetricName())
.withTags(tableQName.parts())).increment();
try {
if (!warehouse.isDir(path)) {
//
// Added to track the number of partition locations that do not exist before
// adding the partition metadata
registry.counter(registry.createId(HiveMetrics.CounterHivePartitionPathIsNotDir.getMetricName())
.withTags(tableQName.parts())).increment();
log.info(String.format("Partition location %s does not exist for table %s",
location, tableQName));
if (!warehouse.mkdirs(path, false)) {
throw new InvalidMetaException(String
.format("%s is not a directory or unable to create one", location), null);
}
}
} catch (Exception e) {
throw new InvalidMetaException(String.format("Failed creating partition location; %s", location),
e);
}
}
}
}
private void copyTableSdToPartitionInfosSd(final List<PartitionInfo> partitionInfos, final Table table) {
//
// Update the partition info based on that of the table.
//
for (PartitionInfo partitionInfo : partitionInfos) {
copyTableSdToPartitionInfoSd(partitionInfo, table);
}
}
private void copyTableSdToPartitionInfoSd(final PartitionInfo partitionInfo, final Table table) {
StorageInfo sd = partitionInfo.getSerde();
//
// Partitions can be provided in the request without the storage information.
//
if (sd == null) {
sd = new StorageInfo();
partitionInfo.setSerde(sd);
}
final StorageDescriptor tableSd = table.getSd();
if (StringUtils.isBlank(sd.getInputFormat())) {
sd.setInputFormat(tableSd.getInputFormat());
}
if (StringUtils.isBlank(sd.getOutputFormat())) {
sd.setOutputFormat(tableSd.getOutputFormat());
}
if (sd.getParameters() == null || sd.getParameters().isEmpty()) {
sd.setParameters(tableSd.getParameters());
}
final SerDeInfo tableSerde = tableSd.getSerdeInfo();
if (tableSerde != null) {
if (StringUtils.isBlank(sd.getSerializationLib())) {
sd.setSerializationLib(tableSerde.getSerializationLib());
}
if (sd.getSerdeInfoParameters() == null || sd.getSerdeInfoParameters().isEmpty()) {
sd.setSerdeInfoParameters(tableSerde.getParameters());
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public void deletePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo
) {
//TODO: implemented as next step
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
throw new UnsupportedOperationException("IcebergTable Unsupported Operation!");
}
//The direct sql based deletion doesn't check if the partition is valid
if (Boolean.parseBoolean(getContext().getConfiguration()
.getOrDefault(HiveConfigConstants.USE_FAST_DELETION, "false"))) {
directSqlSavePartition.delete(tableName, partitionNames);
} else {
//will throw exception if the partitions are invalid
super.deletePartitions(requestContext, tableName, partitionNames, tableInfo);
}
}
/**
* get iceberg table partition summary.
*
* @param tableInfo table info
* @param partitionsRequest partition request
* @return iceberg partition name and metrics mapping
*/
private List<PartitionInfo> getIcebergPartitionInfos(
final TableInfo tableInfo,
final PartitionListRequest partitionsRequest) {
return ConnectorUtils.paginate(
icebergTableHandler.getPartitions(
tableInfo,
context,
partitionsRequest.getFilter(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getSort()
),
partitionsRequest.getPageable()
);
}
}
| 1,602 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastDatabaseService.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
/**
* This class provides the database service using direct sql.
*
* @author amajumdar
* @since 1.3.0
*/
public class HiveConnectorFastDatabaseService extends HiveConnectorDatabaseService {
private final DirectSqlDatabase directSqlDatabase;
/**
* Constructor.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
* @param directSqlDatabase database sql data service
*/
public HiveConnectorFastDatabaseService(final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlDatabase directSqlDatabase) {
super(metacatHiveClient, hiveMetacatConverters);
this.directSqlDatabase = directSqlDatabase;
}
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
directSqlDatabase.update(databaseInfo);
}
}
| 1,603 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlTable.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.MapDifference;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to get/set table metadata.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlTable {
/**
* Defines the table type.
*/
public static final String PARAM_TABLE_TYPE = "table_type";
/**
* Defines the current metadata location of the iceberg table.
*/
public static final String PARAM_METADATA_LOCATION = "metadata_location";
/**
* Defines the previous metadata location of the iceberg table.
*/
public static final String PARAM_PREVIOUS_METADATA_LOCATION = "previous_metadata_location";
/**
* Defines the current partition spec expression of the iceberg table.
*/
public static final String PARAM_PARTITION_SPEC = "partition_spec";
/**
* Iceberg table type.
*/
public static final String ICEBERG_TABLE_TYPE = "ICEBERG";
/**
* VIRTUAL_VIEW table type.
*/
public static final String VIRTUAL_VIEW_TABLE_TYPE = "VIRTUAL_VIEW";
/**
* Defines the metadata content of the iceberg table.
*/
public static final String PARAM_METADATA_CONTENT = "metadata_content";
/**
* List of parameter that needs to be excluded when updating an iceberg table.
*/
public static final Set<String> TABLE_EXCLUDED_PARAMS =
ImmutableSet.of(PARAM_PARTITION_SPEC, PARAM_METADATA_CONTENT);
private static final String COL_PARAM_KEY = "param_key";
private static final String COL_PARAM_VALUE = "param_value";
private final Registry registry;
private final JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
private final String catalogName;
private final DirectSqlSavePartition directSqlSavePartition;
private final Warehouse warehouse;
private final Config config;
/**
* Constructor.
*
* @param connectorContext server context
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
* @param directSqlSavePartition direct sql partition service
* @param warehouse warehouse
*/
public DirectSqlTable(
final ConnectorContext connectorContext,
final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric,
final DirectSqlSavePartition directSqlSavePartition,
final Warehouse warehouse
) {
this.catalogName = connectorContext.getCatalogName();
this.registry = connectorContext.getRegistry();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
this.directSqlSavePartition = directSqlSavePartition;
this.warehouse = warehouse;
this.config = connectorContext.getConfig();
}
/**
* Returns the Jdbc connection of the underlying database.
*
* @return the Jdbc connection of the underlying database
* @throws SQLException if the connection could not be fetched
* @throws NullPointerException if no data source has been configured
*/
@SuppressFBWarnings("NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE")
public Connection getConnection() throws SQLException {
return jdbcTemplate.getDataSource().getConnection();
}
/**
* Returns true if table exists with the given name.
*
* @param name table name
* @return true if table exists with the given name.
*/
@Transactional(readOnly = true)
public boolean exists(final QualifiedName name) {
final long start = registry.clock().wallTime();
boolean result = false;
try {
final Object qResult = jdbcTemplate.queryForObject(SQL.EXIST_TABLE_BY_NAME,
new String[]{name.getDatabaseName(), name.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR}, Integer.class);
if (qResult != null) {
result = true;
}
} catch (EmptyResultDataAccessException e) {
log.debug("Table {} does not exist.", name);
return false;
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagTableExists.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
/**
* Returns all the table names referring to the given <code>uris</code>.
*
* @param uris locations
* @param prefixSearch if true, we look for tables whose location starts with the given <code>uri</code>
* @return map of uri to list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
@Transactional(readOnly = true)
public Map<String, List<QualifiedName>> getTableNames(final List<String> uris, final boolean prefixSearch) {
final long start = registry.clock().wallTime();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL.GET_TABLE_NAMES_BY_URI);
final List<SqlParameterValue> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" and (1=0");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(new SqlParameterValue(Types.VARCHAR, uri + "%"));
});
queryBuilder.append(" )");
} else {
queryBuilder.append(" and location in (");
uris.forEach(uri -> {
queryBuilder.append("?,");
params.add(new SqlParameterValue(Types.VARCHAR, uri));
});
queryBuilder.deleteCharAt(queryBuilder.length() - 1).append(")");
}
ResultSetExtractor<Map<String, List<QualifiedName>>> handler = rs -> {
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String uri = rs.getString("location");
final List<QualifiedName> names = result.computeIfAbsent(uri, k -> Lists.newArrayList());
names.add(QualifiedName.ofTable(catalogName, schemaName, tableName));
}
return result;
};
try {
return jdbcTemplate.query(queryBuilder.toString(), params.toArray(), handler);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetTableNames.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Locks and updates the iceberg table for update so that no other request can modify the table at the same time.
* 1. Gets the table parameters and locks the requested records. If lock cannot be attained,
* the request to update fails
* 2. Validates the metadata location
* 3. If validated, updates the table parameters.
* @param tableInfo table info
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void updateIcebergTable(final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
final Map<String, String> newTableMetadata = tableInfo.getMetadata();
//
// Table info should have the table parameters with the metadata location.
//
HiveTableUtil.throwIfTableMetadataNullOrEmpty(tableName, newTableMetadata);
//
// If the previous metadata location is not empty, check if it is valid.
//
final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
if (config.isIcebergPreviousMetadataLocationCheckEnabled() && !StringUtils.isBlank(previousMetadataLocation)) {
boolean doesPathExists = true;
try {
final Path previousMetadataPath = new Path(previousMetadataLocation);
doesPathExists = warehouse.getFs(previousMetadataPath).exists(previousMetadataPath);
} catch (Exception ignored) {
log.warn(String.format("Failed getting the filesystem for %s", previousMetadataLocation));
registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
}
if (!doesPathExists) {
throw new InvalidMetaException(tableName,
String.format("Invalid metadata for %s..Location %s does not exist",
tableName, previousMetadataLocation), null);
}
}
final Long tableId = getTableId(tableName);
Map<String, String> existingTableMetadata = null;
log.debug("Lock Iceberg table {}", tableName);
try {
existingTableMetadata = jdbcTemplate.query(SQL.TABLE_PARAMS_LOCK,
new SqlParameterValue[]{new SqlParameterValue(Types.BIGINT, tableId)}, rs -> {
final Map<String, String> result = Maps.newHashMap();
while (rs.next()) {
result.put(rs.getString(COL_PARAM_KEY), rs.getString(COL_PARAM_VALUE));
}
return result;
});
} catch (EmptyResultDataAccessException ex) {
log.info(String.format("No parameters defined for iceberg table %s", tableName));
} catch (Exception ex) {
final String message = String.format("Failed getting a lock on iceberg table %s", tableName);
log.warn(message, ex);
throw new InvalidMetaException(tableName, message, null);
}
if (existingTableMetadata == null) {
existingTableMetadata = Maps.newHashMap();
}
final boolean needUpdate = validateIcebergUpdate(tableName, existingTableMetadata, newTableMetadata);
final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
final String newMetadataLocation = newTableMetadata.get(PARAM_METADATA_LOCATION);
log.info("Servicing Iceberg commit request with tableId: {}, needUpdate: {}, "
+ "previousLocation: {}, existingLocation: {}, newLocation: {}",
tableId, needUpdate, previousMetadataLocation, existingMetadataLocation, newMetadataLocation);
if (needUpdate) {
final MapDifference<String, String> diff = Maps.difference(existingTableMetadata, newTableMetadata);
insertTableParams(tableId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateTableParams(tableId, updateParams);
//
// In addition to updating the table params, the table location in HMS needs to be updated for usage by
// external tools, that access HMS directly
//
updateTableLocation(tableId, tableInfo);
log.info("Finished updating Iceberg table with tableId: {}", tableId);
}
log.debug("Unlocked Iceberg table {}", tableName);
}
private void validateTableType(final QualifiedName tableName, final Map<String, String> tableMetadata) {
if (!tableMetadata.isEmpty()) {
if (ICEBERG_TABLE_TYPE.equalsIgnoreCase(tableMetadata.get(PARAM_TABLE_TYPE))) {
return;
}
if (MetacatUtils.isCommonView(tableMetadata)) {
return;
}
}
final String message = String.format("Originally table %s is neither iceberg table nor common view", tableName);
log.info(message);
throw new InvalidMetaException(tableName, message, null);
}
private boolean validateIcebergUpdate(final QualifiedName tableName,
final Map<String, String> existingTableMetadata,
final Map<String, String> newTableMetadata) {
// Validate the type of the table stored in the RDS
validateTableType(tableName, existingTableMetadata);
final String existingMetadataLocation = existingTableMetadata.get(PARAM_METADATA_LOCATION);
final String previousMetadataLocation = newTableMetadata.get(PARAM_PREVIOUS_METADATA_LOCATION);
final String newMetadataLocation = newTableMetadata.get(DirectSqlTable.PARAM_METADATA_LOCATION);
//
// 1. If stored metadata location is empty then the table is not in a valid state.
// 2. If previous metadata location is not provided then the request is invalid.
// 3. If the provided previous metadata location does not match the saved metadata location, then the table
// update should fail.
//
boolean needUpdate = false;
if (StringUtils.isBlank(existingMetadataLocation)) {
final String message = String
.format("Invalid metadata location for iceberg table %s. Existing location is empty.",
tableName);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
} else if (!Objects.equals(existingMetadataLocation, newMetadataLocation)) {
if (StringUtils.isBlank(previousMetadataLocation)) {
final String message = String.format(
"Invalid metadata location for iceberg table %s. Provided previous metadata location is empty.",
tableName);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
} else if (!Objects.equals(existingMetadataLocation, previousMetadataLocation)) {
final String message =
String.format("Invalid metadata location for iceberg table %s (expected:%s, provided:%s)",
tableName, existingMetadataLocation, previousMetadataLocation);
log.error(message);
throw new TablePreconditionFailedException(tableName, message, existingMetadataLocation,
previousMetadataLocation);
}
needUpdate = true;
}
return needUpdate;
}
private void updateTableLocation(final Long tableId, final TableInfo tableInfo) {
final String uri = tableInfo.getSerde() != null ? tableInfo.getSerde().getUri() : null;
if (!Strings.isNullOrEmpty(uri)) {
jdbcTemplate.update(SQL.UPDATE_SDS_LOCATION, new SqlParameterValue(Types.VARCHAR, uri),
new SqlParameterValue(Types.BIGINT, tableId), new SqlParameterValue(Types.VARCHAR, uri));
}
}
private void insertTableParams(final Long tableId, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.filter(s -> !TABLE_EXCLUDED_PARAMS.contains(s.getKey()))
.map(s -> new Object[]{tableId, s.getKey(), s.getValue()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.INSERT_TABLE_PARAMS, paramsList,
new int[]{Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
}
}
private void updateTableParams(final Long tableId, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.filter(s -> !TABLE_EXCLUDED_PARAMS.contains(s.getKey()))
.map(s -> new Object[]{s.getValue(), tableId, s.getKey()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.UPDATE_TABLE_PARAMS, paramsList,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
}
}
/**
* Returns the table internal id.
* @param tableName table name
* @return table id
*/
@Transactional(readOnly = true)
public Long getTableId(final QualifiedName tableName) {
try {
return jdbcTemplate.queryForObject(SQL.GET_TABLE_ID,
new String[]{tableName.getDatabaseName(), tableName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR}, Long.class);
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(tableName);
}
}
/**
* Deletes all the table related information from the store.
* @param tableName table name
*/
public void delete(final QualifiedName tableName) {
try {
final TableSequenceIds ids = getSequenceIds(tableName);
directSqlSavePartition.delete(tableName);
jdbcTemplate.update(SQL.UPDATE_SDS_CD, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.UPDATE_SDS_SERDE, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
//
// Ignore the error. We should be ignoring the error when table does not exist.
// In certain hive metastore versions, these tables might not be present.
// TODO: Better handle this non-existing tables.
//
try {
jdbcTemplate.update(SQL.DELETE_COLUMNS_OLD, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table COLUMNS_OLD does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_PRIVS does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_COL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_COL_PRIVS does not exist.");
}
jdbcTemplate.update(SQL.DELETE_COLUMNS_V2, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_CDS, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_PARTITION_KEYS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TABLE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TAB_COL_STATS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.UPDATE_TABLE_SD, new SqlParameterValue(Types.BIGINT, null),
new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_NAMES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_BUCKETING_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SORT_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SD_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_VALUE_LOC_MAP,
new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_VALUES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SERDE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SERDES, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SDS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_TBLS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException e) {
throw new ConnectorException(String.format("Failed delete hive table %s", tableName), e);
}
}
private TableSequenceIds getSequenceIds(final QualifiedName tableName) {
try {
return jdbcTemplate.queryForObject(
SQL.TABLE_SEQUENCE_IDS,
new Object[]{tableName.getDatabaseName(), tableName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR},
(rs, rowNum) -> new TableSequenceIds(rs.getLong("tbl_id"), rs.getLong("cd_id"),
rs.getLong("sd_id"), rs.getLong("serde_id")));
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(tableName);
}
}
@VisibleForTesting
private static class SQL {
static final String GET_TABLE_NAMES_BY_URI =
"select d.name schema_name, t.tbl_name table_name, s.location"
+ " from DBS d, TBLS t, SDS s where d.DB_ID=t.DB_ID and t.sd_id=s.sd_id";
static final String EXIST_TABLE_BY_NAME =
"select 1 from DBS d join TBLS t on d.DB_ID=t.DB_ID where d.name=? and t.tbl_name=?";
static final String GET_TABLE_ID =
"select t.tbl_id from DBS d join TBLS t on d.DB_ID=t.DB_ID where d.name=? and t.tbl_name=?";
static final String TABLE_PARAM_LOCK =
"SELECT param_value FROM TABLE_PARAMS WHERE tbl_id=? and param_key=? FOR UPDATE";
static final String TABLE_PARAMS_LOCK =
"SELECT param_key, param_value FROM TABLE_PARAMS WHERE tbl_id=? FOR UPDATE";
static final String UPDATE_TABLE_PARAMS =
"update TABLE_PARAMS set param_value=? WHERE tbl_id=? and param_key=?";
static final String INSERT_TABLE_PARAMS =
"insert into TABLE_PARAMS(tbl_id,param_key,param_value) values (?,?,?)";
static final String UPDATE_SDS_LOCATION =
"UPDATE SDS s join TBLS t on s.sd_id=t.sd_id SET s.LOCATION=? WHERE t.TBL_ID=? and s.LOCATION != ?";
static final String UPDATE_SDS_CD = "UPDATE SDS SET CD_ID=? WHERE SD_ID=?";
static final String DELETE_COLUMNS_OLD = "DELETE FROM COLUMNS_OLD WHERE SD_ID=?";
static final String DELETE_COLUMNS_V2 = "DELETE FROM COLUMNS_V2 WHERE CD_ID=?";
static final String DELETE_CDS = "DELETE FROM CDS WHERE CD_ID=?";
static final String DELETE_PARTITION_KEYS = "DELETE FROM PARTITION_KEYS WHERE TBL_ID=?";
static final String DELETE_TABLE_PARAMS = "DELETE FROM TABLE_PARAMS WHERE TBL_ID=?";
static final String DELETE_TAB_COL_STATS = "DELETE FROM TAB_COL_STATS WHERE TBL_ID=?";
static final String UPDATE_TABLE_SD = "UPDATE TBLS SET SD_ID=? WHERE TBL_ID=?";
static final String DELETE_SKEWED_COL_NAMES = "DELETE FROM SKEWED_COL_NAMES WHERE SD_ID=?";
static final String DELETE_BUCKETING_COLS = "DELETE FROM BUCKETING_COLS WHERE SD_ID=?";
static final String DELETE_SORT_COLS = "DELETE FROM SORT_COLS WHERE SD_ID=?";
static final String DELETE_SD_PARAMS = "DELETE FROM SD_PARAMS WHERE SD_ID=?";
static final String DELETE_SKEWED_COL_VALUE_LOC_MAP = "DELETE FROM SKEWED_COL_VALUE_LOC_MAP WHERE SD_ID=?";
static final String DELETE_SKEWED_VALUES = "DELETE FROM SKEWED_VALUES WHERE SD_ID_OID=?";
static final String UPDATE_SDS_SERDE = "UPDATE SDS SET SERDE_ID=? WHERE SD_ID=?";
static final String DELETE_SERDE_PARAMS = "DELETE FROM SERDE_PARAMS WHERE SERDE_ID=?";
static final String DELETE_SERDES = "DELETE FROM SERDES WHERE SERDE_ID=?";
static final String DELETE_SDS = "DELETE FROM SDS WHERE SD_ID=?";
static final String DELETE_TBL_PRIVS = "DELETE FROM TBL_PRIVS WHERE TBL_ID=?";
static final String DELETE_TBL_COL_PRIVS = "DELETE FROM TBL_COL_PRIVS WHERE TBL_ID=?";
static final String DELETE_TBLS = "DELETE FROM TBLS WHERE TBL_ID=?";
static final String TABLE_SEQUENCE_IDS = "select t.tbl_id, s.sd_id, s.cd_id, s.serde_id"
+ " from DBS d join TBLS t on d.db_id=t.db_id join SDS s on t.sd_id=s.sd_id"
+ " where d.name=? and t.tbl_name=?";
}
}
| 1,604 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.spectator.api.Registry;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.FileSystem;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
/**
* HiveConnectorFastTableService.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFastTableService extends HiveConnectorTableService {
private final Registry registry;
@Getter
private final DirectSqlTable directSqlTable;
private final IcebergTableHandler icebergTableHandler;
private final CommonViewHandler commonViewHandler;
private final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy;
/**
* Constructor.
*
* @param catalogName catalog name
* @param metacatHiveClient hive client
* @param hiveConnectorDatabaseService databaseService
* @param hiveMetacatConverters hive converter
* @param connectorContext serverContext
* @param directSqlTable Table jpa service
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @param hiveConnectorFastTableServiceProxy hive connector fast table service proxy
*/
public HiveConnectorFastTableService(
final String catalogName,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final HiveConnectorInfoConverter hiveMetacatConverters,
final ConnectorContext connectorContext,
final DirectSqlTable directSqlTable,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler,
final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy
) {
super(catalogName, metacatHiveClient, hiveConnectorDatabaseService, hiveMetacatConverters, connectorContext);
this.registry = connectorContext.getRegistry();
this.directSqlTable = directSqlTable;
this.icebergTableHandler = icebergTableHandler;
this.commonViewHandler = commonViewHandler;
this.hiveConnectorFastTableServiceProxy = hiveConnectorFastTableServiceProxy;
}
/**
* Returns the Jdbc connection of the underlying database.
*
* @return the Jdbc connection of the underlying database
* @throws SQLException is the connection could not be fetched
*/
public Connection getConnection() throws SQLException {
return directSqlTable.getConnection();
}
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
try {
super.create(requestContext, tableInfo);
} catch (InvalidMetaException e) {
throw handleException(e);
}
}
private RuntimeException handleException(final RuntimeException e) {
//
// On table creation, hive metastore validates the table location.
// On iceberg table get and update, the iceberg method uses the metadata location.
// On both occasions, FileSystem uses a relevant file system based on the location scheme. Noticed an error
// where the s3 client's pool closed abruptly. This causes subsequent request to the s3 client to fail.
// FileSystem caches the file system instances.
// The fix is to clear the FileSystem cache so that it can recreate the file system instances.
//
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex instanceof IllegalStateException && ex.getMessage().contains("Connection pool shut down")) {
log.warn("File system connection pool is down. It will be restarted.");
registry.counter(HiveMetrics.CounterHiveFileSystemFailure.getMetricName()).increment();
try {
FileSystem.closeAll();
} catch (Exception fe) {
log.warn("Failed closing the file system.", fe);
}
Throwables.propagate(ex);
}
}
throw e;
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
return directSqlTable.exists(name);
}
/**
* getTable.
*
* @param requestContext The request context
* @param name The qualified name of the resource to get
* @return table dto
*/
@Override
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final TableInfo info = super.get(requestContext, name);
if (connectorContext.getConfig().isCommonViewEnabled()
&& HiveTableUtil.isCommonView(info)) {
final String tableLoc = HiveTableUtil.getCommonViewMetadataLocation(info);
return hiveConnectorFastTableServiceProxy.getCommonViewTableInfo(name, tableLoc, info,
new HiveTypeConverter(), connectorContext.getConfig().isIcebergCacheEnabled());
}
if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) {
return info;
}
// Return the iceberg table with just the metadata location included.
if (connectorContext.getConfig().shouldFetchOnlyMetadataLocationEnabled()
&& requestContext.isIncludeMetadataLocationOnly()) {
return info;
}
final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info);
final TableInfo result = hiveConnectorFastTableServiceProxy.getIcebergTable(name, tableLoc, info,
requestContext.isIncludeMetadata(), connectorContext.getConfig().isIcebergCacheEnabled());
// Renamed tables could still be cached with the old table name.
// Set it to the qName in the request.
result.setName(name);
return result;
} catch (IllegalStateException e) {
throw handleException(e);
}
}
@Override
public Map<String, List<QualifiedName>> getTableNames(
final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch
) {
return directSqlTable.getTableNames(uris, prefixSearch);
}
/**
* Update a table with the given metadata.
*
* If table is an iceberg table, then lock the table for update so that no other request can update it. If the meta
* information is invalid, then throw an error.
* If table is not an iceberg table, then do a regular table update.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
try {
if (HiveTableUtil.isIcebergTable(tableInfo)) {
icebergTableHandler.handleUpdate(requestContext, this.directSqlTable, tableInfo);
} else if (connectorContext.getConfig().isCommonViewEnabled()
&& HiveTableUtil.isCommonView(tableInfo)) {
final QualifiedName tableName = tableInfo.getName();
HiveTableUtil.throwIfTableMetadataNullOrEmpty(tableName, tableInfo.getMetadata());
final String tableMetadataLocation = HiveTableUtil.getCommonViewMetadataLocation(tableInfo);
commonViewHandler.handleUpdate(requestContext, this.directSqlTable,
tableInfo, tableMetadataLocation);
} else {
super.update(requestContext, tableInfo);
}
} catch (IllegalStateException e) {
throw handleException(e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
directSqlTable.delete(name);
}
}
| 1,605 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlSavePartition.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.PartitionUtil;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.Table;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import java.sql.Types;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to save hive partitions.
*
* @author amajumdar
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlSavePartition {
private static final String PARAM_LAST_DDL_TIME = "transient_lastDdlTime";
private final Registry registry;
private final String catalogName;
private final int batchSize;
private final JdbcTemplate jdbcTemplate;
private final SequenceGeneration sequenceGeneration;
private final HiveConnectorFastServiceMetric fastServiceMetric;
/**
* Constructor.
*
* @param connectorContext connector context
* @param jdbcTemplate JDBC template
* @param sequenceGeneration sequence generator
* @param fastServiceMetric fast service metric
*/
public DirectSqlSavePartition(final ConnectorContext connectorContext, final JdbcTemplate jdbcTemplate,
final SequenceGeneration sequenceGeneration,
final HiveConnectorFastServiceMetric fastServiceMetric) {
this.registry = connectorContext.getRegistry();
this.catalogName = connectorContext.getCatalogName();
this.batchSize = connectorContext.getConfig().getHiveMetastoreBatchSize();
this.jdbcTemplate = jdbcTemplate;
this.sequenceGeneration = sequenceGeneration;
this.fastServiceMetric = fastServiceMetric;
}
/**
* Inserts the partitions.
* Note: Column descriptor of the partitions will be set to that of the table.
*
* @param tableQName table name
* @param table hive table
* @param partitions list of partitions
*/
public void insert(final QualifiedName tableQName, final Table table, final List<PartitionInfo> partitions) {
final long start = registry.clock().wallTime();
try {
// Get the table id and column id
final TableSequenceIds tableSequenceIds = getTableSequenceIds(table.getDbName(), table.getTableName());
// Get the sequence ids and lock the records in the database
final PartitionSequenceIds partitionSequenceIds =
this.getPartitionSequenceIds(partitions.size());
final List<List<PartitionInfo>> subPartitionList = Lists.partition(partitions, batchSize);
// Use the current time for create and update time.
final long currentTimeInEpoch = Instant.now().getEpochSecond();
int index = 0;
// Insert the partitions in batches
for (List<PartitionInfo> subPartitions : subPartitionList) {
_insert(tableQName, table, tableSequenceIds, partitionSequenceIds, subPartitions, currentTimeInEpoch,
index);
index += batchSize;
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAddPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
private PartitionSequenceIds getPartitionSequenceIds(final int size) {
return new PartitionSequenceIds(sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_PARTITION),
sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_SDS),
sequenceGeneration.newPartitionSequenceIdByName(size,
SequenceGeneration.SEQUENCE_NAME_SERDES));
}
@SuppressWarnings("checkstyle:methodname")
private void _insert(final QualifiedName tableQName, final Table table, final TableSequenceIds tableSequenceIds,
final PartitionSequenceIds partitionSequenceIds, final List<PartitionInfo> partitions,
final long currentTimeInEpoch, final int index) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<Object[]> partitionKeyValsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
int currentIndex = index;
for (PartitionInfo partition : partitions) {
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionSequenceIds.getPartId() + currentIndex;
final long sdsId = partitionSequenceIds.getSdsId() + currentIndex;
final long serdeId = partitionSequenceIds.getSerdeId() + currentIndex;
final String partitionName = partition.getName().getPartitionName();
final List<String> partValues = PartitionUtil.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
partitionsValues.add(new Object[]{0, tableSequenceIds.getTableId(), currentTimeInEpoch,
sdsId, escapedPartName, partId, });
for (int i = 0; i < partValues.size(); i++) {
partitionKeyValsValues.add(new Object[]{partId, partValues.get(i), i});
}
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters
.forEach((key, value) -> partitionParamsValues.add(new Object[]{value, partId, key}));
}
partitionParamsValues.add(new Object[]{currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME});
if (storageInfo != null) {
serdesValues.add(new Object[]{null, storageInfo.getSerializationLib(), serdeId});
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters
.forEach((key, value) -> serdeParamsValues.add(new Object[]{value, serdeId, key}));
}
sdsValues.add(new Object[]{storageInfo.getOutputFormat(), false, tableSequenceIds.getCdId(),
false, serdeId, storageInfo.getUri(), storageInfo.getInputFormat(), 0, sdsId, });
}
partitionNames.add(partitionName);
currentIndex++;
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_INSERT, serdesValues,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT, serdeParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.SDS_INSERT, sdsValues,
new int[]{Types.VARCHAR, Types.BOOLEAN, Types.BIGINT, Types.BOOLEAN,
Types.BIGINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.BIGINT, });
jdbcTemplate.batchUpdate(SQL.PARTITIONS_INSERT, partitionsValues,
new int[]{Types.INTEGER, Types.BIGINT, Types.INTEGER, Types.BIGINT, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT, partitionParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.PARTITION_KEY_VALS_INSERT, partitionKeyValsValues,
new int[]{Types.BIGINT, Types.VARCHAR, Types.INTEGER});
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed inserting partitions %s for table %s", partitionNames, tableQName), e);
}
}
private TableSequenceIds getTableSequenceIds(final String dbName, final String tableName) {
try {
return jdbcTemplate.queryForObject(SQL.TABLE_SELECT,
new SqlParameterValue[]{new SqlParameterValue(Types.VARCHAR, dbName),
new SqlParameterValue(Types.VARCHAR, tableName), },
(rs, rowNum) -> new TableSequenceIds(rs.getLong("tbl_id"), rs.getLong("cd_id")));
} catch (EmptyResultDataAccessException e) {
throw new TableNotFoundException(QualifiedName.ofTable(catalogName, dbName, tableName));
} catch (Exception e) {
throw new ConnectorException(String.format("Failed getting the sequence id for table %s", tableName), e);
}
}
/**
* Updates the existing partitions. This method assumes that the partitions already exists and so does not
* validate to check if it exists.
* Note: Column descriptor of the partitions will not be updated.
*
* @param tableQName table name
* @param partitionHolders list of partitions
*/
public void update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders) {
final long start = registry.clock().wallTime();
try {
final List<List<PartitionHolder>> subPartitionDetailList = Lists.partition(partitionHolders, batchSize);
final long currentTimeInEpoch = Instant.now().getEpochSecond();
for (List<PartitionHolder> subPartitionHolders : subPartitionDetailList) {
_update(tableQName, subPartitionHolders, currentTimeInEpoch);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAlterPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@SuppressWarnings("checkstyle:methodname")
private void _update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders,
final long currentTimeInEpoch) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
for (PartitionHolder partitionHolder : partitionHolders) {
final PartitionInfo partition = partitionHolder.getPartitionInfo();
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionHolder.getId();
final long sdsId = partitionHolder.getSdId();
final long serdeId = partitionHolder.getSerdeId();
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters
.forEach((key, value) -> partitionParamsValues.add(new Object[]{value, partId, key, value}));
}
partitionParamsValues.add(
new Object[]{currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME, currentTimeInEpoch});
if (storageInfo != null) {
serdesValues.add(new Object[]{null, storageInfo.getSerializationLib(), serdeId});
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters
.forEach((key, value) -> serdeParamsValues.add(new Object[]{value, serdeId, key, value}));
}
sdsValues.add(new Object[]{storageInfo.getOutputFormat(), false, false, storageInfo.getUri(),
storageInfo.getInputFormat(), sdsId, });
}
partitionNames.add(partition.getName().toString());
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_UPDATE, serdesValues,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT_UPDATE, serdeParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.SDS_UPDATE, sdsValues,
new int[]{Types.VARCHAR, Types.BOOLEAN, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT_UPDATE, partitionParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed updating partitions %s for table %s", partitionNames, tableQName), e);
}
}
/**
* Delete the partitions with the given <code>partitionNames</code>.
*
* @param tableQName table name
* @param partitionNames list of partition ids
*/
public void delete(final QualifiedName tableQName, final List<String> partitionNames) {
final long start = registry.clock().wallTime();
try {
final List<List<String>> subPartitionNameList = Lists.partition(partitionNames, batchSize);
subPartitionNameList.forEach(subPartitionNames -> _delete(tableQName, subPartitionNames));
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagDropHivePartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@SuppressWarnings("checkstyle:methodname")
private void _delete(final QualifiedName tableQName, final List<String> partitionNames) {
try {
final List<PartitionSequenceIds> partitionSequenceIds = getPartitionSequenceIds(tableQName, partitionNames);
if (partitionSequenceIds != null && !partitionSequenceIds.isEmpty()) {
_delete(partitionSequenceIds);
}
} catch (EmptyResultDataAccessException ignored) {
log.debug("None of the table {} partitions {} exist for dropping.", tableQName, partitionNames, ignored);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed dropping table %s partitions: %s", tableQName, partitionNames), e);
}
}
private List<PartitionSequenceIds> getPartitionSequenceIds(final QualifiedName tableName,
final List<String> partitionNames) {
final List<String> paramVariables = partitionNames.stream().map(s -> "?").collect(Collectors.toList());
final String paramVariableString = Joiner.on(",").skipNulls().join(paramVariables);
final SqlParameterValue[] values = new SqlParameterValue[partitionNames.size() + 2];
int index = 0;
values[index++] = new SqlParameterValue(Types.VARCHAR, tableName.getDatabaseName());
values[index++] = new SqlParameterValue(Types.VARCHAR, tableName.getTableName());
for (String partitionName : partitionNames) {
values[index++] = new SqlParameterValue(Types.VARCHAR, partitionName);
}
return jdbcTemplate.query(
String.format(SQL.PARTITIONS_SELECT, paramVariableString), values,
(rs, rowNum) -> new PartitionSequenceIds(rs.getLong("part_id"), rs.getLong("sd_id"),
rs.getLong("serde_id")));
}
/**
* Delete all the partitions for the given table <code>tableQName</code>.
*
* @param tableQName table name
*/
public void delete(final QualifiedName tableQName) {
final long start = registry.clock().wallTime();
try {
List<PartitionSequenceIds> partitionSequenceIds = getPartitionSequenceIds(tableQName);
while (!partitionSequenceIds.isEmpty()) {
_delete(partitionSequenceIds);
partitionSequenceIds = getPartitionSequenceIds(tableQName);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagDropHivePartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
private List<PartitionSequenceIds> getPartitionSequenceIds(final QualifiedName tableQName) {
return jdbcTemplate.query(
String.format(SQL.PARTITIONS_SELECT_ALL, this.batchSize),
new Object[]{tableQName.getDatabaseName(), tableQName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR},
(rs, rowNum) -> new PartitionSequenceIds(rs.getLong("part_id"), rs.getLong("sd_id"),
rs.getLong("serde_id")));
}
@SuppressWarnings("checkstyle:methodname")
private void _delete(final List<PartitionSequenceIds> subPartitionIds) {
final List<String> paramVariables = subPartitionIds.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] partIds =
subPartitionIds.stream().map(p -> new SqlParameterValue(Types.BIGINT, p.getPartId()))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] sdsIds =
subPartitionIds.stream().map(p -> new SqlParameterValue(Types.BIGINT, p.getSdsId()))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] serdeIds =
subPartitionIds.stream().filter(p -> p.getSerdeId() != null)
.map(p -> new SqlParameterValue(Types.BIGINT, p.getSerdeId()))
.toArray(SqlParameterValue[]::new);
final String paramVariableString = Joiner.on(",").skipNulls().join(paramVariables);
jdbcTemplate.update(
String.format(SQL.PARTITION_KEY_VALS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.PARTITION_PARAMS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.PARTITIONS_DELETES, paramVariableString), (Object[]) partIds);
jdbcTemplate.update(
String.format(SQL.SERDE_PARAMS_DELETES, paramVariableString), (Object[]) serdeIds);
jdbcTemplate.update(
String.format(SQL.BUCKETING_COLS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SORT_COLS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SDS_DELETES, paramVariableString), (Object[]) sdsIds);
jdbcTemplate.update(
String.format(SQL.SERDES_DELETES, paramVariableString), (Object[]) serdeIds);
}
/**
* Drops, updates and adds partitions for a table.
*
* @param tableQName table name
* @param table table
* @param addedPartitionInfos new partitions to be added
* @param existingPartitionHolders existing partitions to be altered/updated
* @param deletePartitionNames existing partitions to be dropped
*/
public void addUpdateDropPartitions(final QualifiedName tableQName, final Table table,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionHolders,
final Set<String> deletePartitionNames) {
final long start = registry.clock().wallTime();
try {
if (!deletePartitionNames.isEmpty()) {
delete(tableQName, Lists.newArrayList(deletePartitionNames));
}
if (!existingPartitionHolders.isEmpty()) {
update(tableQName, existingPartitionHolders);
}
if (!addedPartitionInfos.isEmpty()) {
insert(tableQName, table, addedPartitionInfos);
}
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAddDropPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
@VisibleForTesting
private static class SQL {
static final String SERDES_INSERT =
"INSERT INTO SERDES (NAME,SLIB,SERDE_ID) VALUES (?,?,?)";
static final String SERDES_UPDATE =
"UPDATE SERDES SET NAME=?,SLIB=? WHERE SERDE_ID=?";
static final String SERDES_DELETES =
"DELETE FROM SERDES WHERE SERDE_ID in (%s)";
static final String SERDE_PARAMS_INSERT =
"INSERT INTO SERDE_PARAMS(PARAM_VALUE,SERDE_ID,PARAM_KEY) VALUES (?,?,?)";
static final String SERDE_PARAMS_INSERT_UPDATE =
"INSERT INTO SERDE_PARAMS(PARAM_VALUE,SERDE_ID,PARAM_KEY) VALUES (?,?,?) "
+ "ON DUPLICATE KEY UPDATE PARAM_VALUE=?";
static final String SERDE_PARAMS_DELETES =
"DELETE FROM SERDE_PARAMS WHERE SERDE_ID in (%s)";
static final String SDS_INSERT =
"INSERT INTO SDS (OUTPUT_FORMAT,IS_COMPRESSED,CD_ID,IS_STOREDASSUBDIRECTORIES,SERDE_ID,LOCATION, "
+ "INPUT_FORMAT,NUM_BUCKETS,SD_ID) VALUES (?,?,?,?,?,?,?,?,?)";
static final String SDS_UPDATE =
"UPDATE SDS SET OUTPUT_FORMAT=?,IS_COMPRESSED=?,IS_STOREDASSUBDIRECTORIES=?,LOCATION=?, "
+ "INPUT_FORMAT=? WHERE SD_ID=?";
static final String BUCKETING_COLS_DELETES =
"DELETE FROM BUCKETING_COLS WHERE SD_ID in (%s)";
static final String SORT_COLS_DELETES =
"DELETE FROM SORT_COLS WHERE SD_ID in (%s)";
static final String SDS_DELETES =
"DELETE FROM SDS WHERE SD_ID in (%s)";
static final String PARTITIONS_INSERT =
"INSERT INTO PARTITIONS(LAST_ACCESS_TIME,TBL_ID,CREATE_TIME,SD_ID,PART_NAME,PART_ID) VALUES (?,?,?,?,?,?)";
static final String PARTITIONS_DELETES =
"DELETE FROM PARTITIONS WHERE PART_ID in (%s)";
static final String PARTITION_PARAMS_INSERT =
"INSERT INTO PARTITION_PARAMS (PARAM_VALUE,PART_ID,PARAM_KEY) VALUES (?,?,?)";
static final String PARTITION_PARAMS_INSERT_UPDATE =
"INSERT INTO PARTITION_PARAMS (PARAM_VALUE,PART_ID,PARAM_KEY) VALUES (?,?,?) "
+ "ON DUPLICATE KEY UPDATE PARAM_VALUE=?";
static final String PARTITION_PARAMS_DELETES =
"DELETE FROM PARTITION_PARAMS WHERE PART_ID in (%s)";
static final String PARTITION_KEY_VALS_INSERT =
"INSERT INTO PARTITION_KEY_VALS(PART_ID,PART_KEY_VAL,INTEGER_IDX) VALUES (?,?,?)";
static final String PARTITION_KEY_VALS_DELETES =
"DELETE FROM PARTITION_KEY_VALS WHERE PART_ID in (%s)";
static final String PARTITIONS_SELECT_ALL =
"SELECT P.PART_ID, P.SD_ID, S.SERDE_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID "
+ "JOIN PARTITIONS P ON T.TBL_ID=P.TBL_ID JOIN SDS S ON P.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=? limit %d";
static final String PARTITIONS_SELECT =
"SELECT P.PART_ID, P.SD_ID, S.SERDE_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID "
+ "JOIN PARTITIONS P ON T.TBL_ID=P.TBL_ID JOIN SDS S ON P.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=? and P.PART_NAME in (%s)";
static final String TABLE_SELECT =
"SELECT T.TBL_ID, S.CD_ID FROM DBS D JOIN TBLS T ON D.DB_ID=T.DB_ID JOIN SDS S ON T.SD_ID=S.SD_ID "
+ "WHERE D.NAME=? and T.TBL_NAME=?";
}
}
| 1,606 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/SequenceGeneration.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
/**
* This class is used to generate the sequence ids.
*
* @author amajumdar
* @since 1.1.x
*/
@Slf4j
@Transactional("hiveTxManager")
public class SequenceGeneration {
/**
* MPartition sequence number.
**/
public static final String SEQUENCE_NAME_PARTITION = "org.apache.hadoop.hive.metastore.model.MPartition";
/**
* MSerDeInfo sequence number.
**/
public static final String SEQUENCE_NAME_SERDES = "org.apache.hadoop.hive.metastore.model.MSerDeInfo";
/**
* MStorageDescriptor sequence number.
**/
public static final String SEQUENCE_NAME_SDS = "org.apache.hadoop.hive.metastore.model.MStorageDescriptor";
private final JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param jdbcTemplate JDBC template
*/
public SequenceGeneration(@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate jdbcTemplate) {
this.jdbcTemplate = jdbcTemplate;
}
/**
* Returns the current sequence ids and increments the sequence ids by the given <code>size</code>.
*
* @param size number of records getting inserted
* @param sequenceParamName the sequence Parameter Name
* @return current sequence ids
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public Long newPartitionSequenceIdByName(final int size, final String sequenceParamName) {
Long result = null;
try {
//Get current sequence number
result = jdbcTemplate.queryForObject(SQL.SEQUENCE_NEXT_VAL_BYNAME,
new Object[]{sequenceParamName}, Long.class);
} catch (EmptyResultDataAccessException e) {
log.warn("Failed getting the sequence ids for partition", e);
} catch (Exception e) {
throw new ConnectorException("Failed retrieving the sequence numbers.");
}
try {
if (result == null) {
result = 1L; //init to 1L in case there's no records
jdbcTemplate.update(SQL.SEQUENCE_INSERT_VAL, result + size, sequenceParamName);
} else {
jdbcTemplate.update(SQL.SEQUENCE_UPDATE_VAL, result + size, sequenceParamName);
}
return result;
} catch (Exception e) {
throw new ConnectorException("Failed updating the sequence ids for partition", e);
}
}
@VisibleForTesting
private static class SQL {
static final String SEQUENCE_INSERT_VAL =
"INSERT INTO SEQUENCE_TABLE(NEXT_VAL,SEQUENCE_NAME) VALUES (?,?)";
static final String SEQUENCE_UPDATE_VAL =
"UPDATE SEQUENCE_TABLE SET NEXT_VAL=? WHERE SEQUENCE_NAME=?";
static final String SEQUENCE_NEXT_VAL_BYNAME =
"SELECT NEXT_VAL FROM SEQUENCE_TABLE WHERE SEQUENCE_NAME=? FOR UPDATE";
}
}
| 1,607 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlDatabase.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.MapDifference;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.rowset.SqlRowSet;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nullable;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to update database metadata.
*
* @author amajumdar
* @since 1.3.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlDatabase {
private static final String COL_URI = "uri";
private static final String COL_OWNER = "owner";
private static final String COL_PARAM_KEY = "param_key";
private static final String COL_PARAM_VALUE = "param_value";
private final Registry registry;
private final JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
/**
* Constructor.
*
* @param connectorContext server context
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
*/
public DirectSqlDatabase(
final ConnectorContext connectorContext,
final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric
) {
this.registry = connectorContext.getRegistry();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
}
/**
* Returns the database internal id.
* @param databaseName database name
* @return database id
*/
private Long getDatabaseId(final QualifiedName databaseName) {
try {
return jdbcTemplate.queryForObject(SQL.GET_DATABASE_ID,
new String[]{databaseName.getDatabaseName()},
new int[]{Types.VARCHAR}, Long.class);
} catch (EmptyResultDataAccessException e) {
log.debug("Database {} not found.", databaseName);
throw new DatabaseNotFoundException(databaseName);
}
}
/**
* Returns the database.
* @param databaseName database name
* @return database
*/
@Transactional(readOnly = true)
public DatabaseInfo getDatabase(final QualifiedName databaseName) {
final Long id = getDatabaseId(databaseName);
return getDatabaseById(id, databaseName);
}
private DatabaseInfo getDatabaseById(final Long id, final QualifiedName databaseName) {
DatabaseInfo result = null;
try {
// Retrieve databaseRowSet info record
final SqlRowSet databaseRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE,
new Object[]{id}, new int[]{Types.BIGINT});
if (databaseRowSet.first()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdBy(databaseRowSet.getString(COL_OWNER)).build();
//Retrieve databaseRowSet params
final Map<String, String> metadata = Maps.newHashMap();
try {
final SqlRowSet paramRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE_PARAMS,
new Object[]{id}, new int[]{Types.BIGINT});
while (paramRowSet.next()) {
metadata.put(paramRowSet.getString(COL_PARAM_KEY),
paramRowSet.getString(COL_PARAM_VALUE));
}
} catch (EmptyResultDataAccessException ignored) { }
result = DatabaseInfo.builder()
.name(databaseName)
.uri(databaseRowSet.getString(COL_URI))
.auditInfo(auditInfo).metadata(metadata).build();
}
} catch (EmptyResultDataAccessException e) {
log.debug("Database {} not found.", databaseName);
throw new DatabaseNotFoundException(databaseName);
}
return result;
}
/**
* Updates the database object.
* @param databaseInfo database object
*/
public void update(final DatabaseInfo databaseInfo) {
log.debug("Start: Database update using direct sql for {}", databaseInfo.getName());
final long start = registry.clock().wallTime();
try {
final Long databaseId = getDatabaseId(databaseInfo.getName());
final DatabaseInfo existingDatabaseInfo = getDatabaseById(databaseId, databaseInfo.getName());
final Map<String, String> newMetadata = databaseInfo.getMetadata() == null ? Maps.newHashMap()
: databaseInfo.getMetadata();
final MapDifference<String, String> diff = Maps.difference(existingDatabaseInfo.getMetadata(), newMetadata);
insertDatabaseParams(databaseId, diff.entriesOnlyOnRight());
final Map<String, String> updateParams = diff.entriesDiffering().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, s -> s.getValue().rightValue()));
updateDatabaseParams(databaseId, updateParams);
final String uri =
Strings.isNullOrEmpty(databaseInfo.getUri()) ? existingDatabaseInfo.getUri() : databaseInfo.getUri();
final String newOwner = getOwner(databaseInfo.getAudit());
final String owner =
Strings.isNullOrEmpty(newOwner) ? newOwner : existingDatabaseInfo.getAudit().getCreatedBy();
jdbcTemplate.update(SQL.UPDATE_DATABASE, new SqlParameterValue(Types.VARCHAR, uri),
new SqlParameterValue(Types.VARCHAR, owner),
new SqlParameterValue(Types.BIGINT, databaseId));
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagAlterDatabase.getMetricName(), registry.clock().wallTime() - start);
log.debug("End: Database update using direct sql for {}", databaseInfo.getName());
}
}
private String getOwner(@Nullable final AuditInfo audit) {
return audit != null ? audit.getCreatedBy() : null;
}
private void insertDatabaseParams(final Long id, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.map(s -> new Object[]{id, s.getKey(), s.getValue()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.INSERT_DATABASE_PARAMS, paramsList,
new int[]{Types.BIGINT, Types.VARCHAR, Types.VARCHAR});
}
}
private void updateDatabaseParams(final Long id, final Map<String, String> params) {
if (!params.isEmpty()) {
final List<Object[]> paramsList = params.entrySet().stream()
.map(s -> new Object[]{s.getValue(), id, s.getKey()}).collect(Collectors.toList());
jdbcTemplate.batchUpdate(SQL.UPDATE_DATABASE_PARAMS, paramsList,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
}
}
@VisibleForTesting
private static class SQL {
static final String GET_DATABASE_ID =
"select d.db_id from DBS d where d.name=?";
static final String GET_DATABASE =
"select d.desc, d.name, d.db_location_uri uri, d.owner_name owner from DBS d where d.db_id=?";
static final String GET_DATABASE_PARAMS =
"select param_key, param_value from DATABASE_PARAMS where db_id=?";
static final String UPDATE_DATABASE_PARAMS =
"update DATABASE_PARAMS set param_value=? WHERE db_id=? and param_key=?";
static final String INSERT_DATABASE_PARAMS =
"insert into DATABASE_PARAMS(db_id,param_key,param_value) values (?,?,?)";
static final String UPDATE_DATABASE =
"UPDATE DBS SET db_location_uri=?, owner_name=? WHERE db_id=?";
}
}
| 1,608 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/PartitionHolder.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import lombok.Data;
import org.apache.hadoop.hive.metastore.api.Partition;
/**
* A wrapper class to hold the Partition internal ids and the partition either as PartitionInfo or Partition.
* @author amajumdar
* @since 1.1.x
*/
@Data
public class PartitionHolder {
// id of the PARTITIONS table
private Long id;
// id of the SDS table
private Long sdId;
// id of the SERDES table
private Long serdeId;
private PartitionInfo partitionInfo;
private Partition partition;
/**
* Constructor populating the ids and partitionInfo.
* @param id partition id
* @param sdId partition storage id
* @param serdeId partition serde id
* @param partitionInfo partition info
*/
public PartitionHolder(final Long id, final Long sdId, final Long serdeId, final PartitionInfo partitionInfo) {
this.id = id;
this.sdId = sdId;
this.serdeId = serdeId;
this.partitionInfo = partitionInfo;
}
/**
* Constructor populating the partition only.
* @param partition partition
*/
public PartitionHolder(final Partition partition) {
this.partition = partition;
}
}
| 1,609 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlGetPartition.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.HiveFilterPartition;
import com.netflix.metacat.connector.hive.util.PartitionFilterGenerator;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Table;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.StringReader;
import java.sql.Types;
import java.time.Instant;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* This class makes direct sql calls to get partitions.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
@Transactional("hiveTxManager")
public class DirectSqlGetPartition {
/**
* DateCreated field users can request to sort on.
*/
public static final String FIELD_DATE_CREATED = "dateCreated";
private static final String FIELD_BATCHID = "batchid";
private static final String AUDIT_DB = "audit";
private static final Pattern AUDIT_TABLENAME_PATTERN = Pattern.compile(
"(?<db>.*)__(?<table>.*)__audit(.*)$"
);
private static final String PARTITION_NAME = "name";
private static final String PARTITION_URI = "uri";
private final ThreadServiceManager threadServiceManager;
private final Registry registry;
private JdbcTemplate jdbcTemplate;
private final HiveConnectorFastServiceMetric fastServiceMetric;
private final String catalogName;
private final Config config;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param connectorContext server context
* @param threadServiceManager thread service manager
* @param jdbcTemplate JDBC template
* @param fastServiceMetric fast service metric
*/
public DirectSqlGetPartition(
final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager,
@Qualifier("hiveReadJdbcTemplate") final JdbcTemplate jdbcTemplate,
final HiveConnectorFastServiceMetric fastServiceMetric
) {
this.catalogName = connectorContext.getCatalogName();
this.threadServiceManager = threadServiceManager;
this.registry = connectorContext.getRegistry();
this.config = connectorContext.getConfig();
this.jdbcTemplate = jdbcTemplate;
this.fastServiceMetric = fastServiceMetric;
configuration = connectorContext.getConfiguration();
}
/**
* Number of partitions for the given table.
*
* @param requestContext request context
* @param tableName tableName
* @return Number of partitions
*/
@Transactional(readOnly = true)
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName
) {
final long start = registry.clock().wallTime();
// Handler for reading the result set
final ResultSetExtractor<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
try {
final Optional<QualifiedName> sourceTable
= getSourceTableName(tableName.getDatabaseName(), tableName.getTableName(),
false);
return sourceTable.map(
qualifiedName ->
jdbcTemplate.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_COUNT,
new String[]{
tableName.getDatabaseName(),
tableName.getTableName(),
qualifiedName.getDatabaseName(),
qualifiedName.getTableName(), },
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR}, handler))
.orElseGet(
() -> jdbcTemplate.query(SQL.SQL_GET_PARTITION_COUNT,
new String[]{
tableName.getDatabaseName(),
tableName.getTableName(), },
new int[]{Types.VARCHAR, Types.VARCHAR}, handler));
} catch (Exception e) {
throw new ConnectorException("Failed getting the partition count", e);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionCount.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Gets the Partitions based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table name
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partitions
*/
@Transactional(readOnly = true)
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest
) {
final long start = registry.clock().wallTime();
try {
return this.getPartitions(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getFilter(),
partitionsRequest.getSort(),
partitionsRequest.getPageable(),
partitionsRequest.getIncludePartitionDetails(),
partitionsRequest.getIncludeAuditOnly()
).stream().map(PartitionHolder::getPartitionInfo).collect(Collectors.toList());
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* Gets the partition uris based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partition names
*/
@Transactional(readOnly = true)
public List<String> getPartitionUris(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
return filterPartitionsColumn(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionNames,
PARTITION_URI,
filterExpression,
sort,
pageable,
partitionsRequest.getIncludeAuditOnly());
} else {
final ResultSetExtractor<List<String>> handler = rs -> {
final List<String> uris = Lists.newArrayList();
while (rs.next()) {
uris.add(rs.getString(PARTITION_URI));
}
return uris;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(),
null, partitionNames, SQL.SQL_GET_PARTITIONS_URI, handler, sort, pageable,
partitionsRequest.getIncludeAuditOnly());
}
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
/**
* query partitions using filters from name or uri column.
*/
private List<String> filterPartitionsColumn(
final String databaseName,
final String tableName,
final List<String> partitionNames,
final String columnName,
final String filterExpression,
final Sort sort,
final Pageable pageable,
final boolean forceDisableAudit) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition()
: new FilterPartition();
// batch exists
final boolean isBatched =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
ResultSetExtractor<List<String>> handler = rs -> {
final List<String> columns = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString(PARTITION_NAME);
final String uri = rs.getString(PARTITION_URI);
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression)
|| filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
columns.add(rs.getString(columnName));
}
}
return columns;
};
return getHandlerResults(databaseName,
tableName, filterExpression, partitionNames,
SQL.SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable, forceDisableAudit);
}
/**
* Gets the partition names/keys based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partition names
*/
@Transactional(readOnly = true)
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
return filterPartitionsColumn(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionNames,
PARTITION_NAME,
filterExpression,
sort,
pageable,
partitionsRequest.getIncludeAuditOnly());
} else {
final ResultSetExtractor<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(),
null, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY,
handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
}
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
/**
* getPartitionNames.
*
* @param context request context
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Transactional(readOnly = true)
public Map<String, List<QualifiedName>> getPartitionNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
final long start = registry.clock().wallTime();
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL.SQL_GET_PARTITION_NAMES_BY_URI);
final List<SqlParameterValue> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" 1=2");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(new SqlParameterValue(Types.VARCHAR, uri + "%"));
});
} else {
queryBuilder.append(" location in (");
Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
queryBuilder.append(")");
params.addAll(uris.stream()
.map(uri -> new SqlParameterValue(Types.VARCHAR, uri)).collect(Collectors.toList()));
}
final ResultSetExtractor<Map<String, List<QualifiedName>>> handler = rs -> {
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String partitionName = rs.getString("partition_name");
final String uri = rs.getString("location");
final List<QualifiedName> partitionNames = result.get(uri);
final QualifiedName qualifiedName =
QualifiedName.ofPartition(catalogName, schemaName, tableName, partitionName);
if (partitionNames == null) {
result.put(uri, Lists.newArrayList(qualifiedName));
} else {
partitionNames.add(qualifiedName);
}
}
return result;
};
try {
jdbcTemplate.query(queryBuilder.toString(), params.toArray(), handler);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionNames.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
@Transactional(readOnly = true)
protected Map<String, PartitionHolder> getPartitionHoldersByNames(final Table table,
final List<String> partitionNames,
final boolean forceDisableAudit) {
//this is internal call to get partitions, always set the forceDisableAudit = true
return this.getPartitions(
table.getDbName(),
table.getTableName(),
partitionNames,
null,
null,
null,
false,
forceDisableAudit
).stream().collect(Collectors.toMap(
p -> p.getPartitionInfo().getName().getPartitionName(),
p -> p)
);
}
private List<PartitionHolder> getPartitions(
final String databaseName,
final String tableName,
@Nullable final List<String> partitionIds,
@Nullable final String filterExpression,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includePartitionDetails,
final boolean forceDisableAudit
) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition()
: new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
final List<PartitionHolder> result = Lists.newArrayList();
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
int noOfRows = 0;
while (rs.next()) {
noOfRows++;
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression)
|| filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionHolder(id, sdId, serdeId,
PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName,
databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
// Fail if the number of partitions exceeds the threshold limit.
if (result.size() > config.getMaxPartitionsThreshold()) {
registry.counter(registry.createId(HiveMetrics.CounterHiveGetPartitionsExceedThresholdFailure
.getMetricName()).withTags(tableQName.parts())).increment();
final String message =
String.format("Number of partitions queried for table %s exceeded the threshold %d",
tableQName, config.getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
registry.gauge(registry.createId(HiveMetrics.GaugePreExpressionFilterGetPartitionsCount
.getMetricName()).withTags(tableQName.parts())).set(noOfRows);
return result;
};
final List<PartitionHolder> partitions = this.getHandlerResults(
databaseName,
tableName,
filterExpression,
partitionIds,
SQL.SQL_GET_PARTITIONS,
handler,
sort,
pageable,
forceDisableAudit
);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionHolder partitionHolder : partitions) {
partIds.add(partitionHolder.getId());
sdIds.add(partitionHolder.getSdId());
serdeIds.add(partitionHolder.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS,
"part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS,
"sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() ->
populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS,
"serde_id", serdeParams)));
}
ListenableFuture<List<Void>> future = null;
try {
future = Futures.allAsList(futures);
final int getPartitionsDetailsTimeout = Integer.parseInt(configuration
.getOrDefault(HiveConfigConstants.GET_PARTITION_DETAILS_TIMEOUT, "120"));
future.get(getPartitionsDetailsTimeout, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
try {
if (future != null) {
future.cancel(true);
}
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partition details.");
}
Throwables.propagate(e);
}
for (PartitionHolder partitionHolder : partitions) {
partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
partitionHolder.getPartitionInfo().getSerde()
.setParameters(sdParams.get(partitionHolder.getSdId()));
partitionHolder.getPartitionInfo().getSerde()
.setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));
}
}
return partitions;
}
private <T> List<T> getHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor<List<T>> resultSetExtractor,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
List<T> partitions;
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final PartitionFilterGenerator generator =
new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName, forceDisableAudit),
config.escapePartitionNameOnFilter());
String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(generator, null);
if (generator.isOptimized()) {
filterSql = generator.getOptimizedSql();
}
if (filterSql != null && !filterSql.isEmpty()) {
filterSql = " and (" + filterSql + ")";
}
partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds,
sql, resultSetExtractor,
generator.joinSql(), filterSql,
generator.getParams(), sort, pageable, forceDisableAudit);
} else {
partitions = getHandlerResults(databaseName, tableName, null, partitionIds,
sql, resultSetExtractor,
null, null,
null, sort, pageable, forceDisableAudit);
}
} catch (Exception e) {
log.warn("Experiment: Get partitions for for table {} filter {}"
+ " failed with error {}", tableQName.toString(), filterExpression,
e.getMessage());
registry.counter(registry
.createId(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.getMetricName())
.withTags(tableQName.parts())).increment();
partitions = getHandlerResults(databaseName, tableName,
filterExpression, partitionIds, sql, resultSetExtractor, null,
prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable, forceDisableAudit);
}
return partitions;
}
private List<FieldSchema> getPartitionKeys(final String databaseName,
final String tableName,
final boolean forceDisableAudit) {
final List<FieldSchema> result = Lists.newArrayList();
final ResultSetExtractor<List<FieldSchema>> handler = rs -> {
while (rs.next()) {
final String name = rs.getString("pkey_name");
final String type = rs.getString("pkey_type");
result.add(new FieldSchema(name, type, null));
}
return result;
};
final Optional<QualifiedName> sourceTable = getSourceTableName(databaseName, tableName, forceDisableAudit);
return sourceTable.map(qualifiedName -> jdbcTemplate
.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_KEYS,
new Object[]{databaseName, tableName, qualifiedName.getDatabaseName(), qualifiedName.getTableName()},
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR},
handler))
.orElseGet(() -> jdbcTemplate
.query(SQL.SQL_GET_PARTITION_KEYS,
new Object[]{databaseName, tableName},
new int[]{Types.VARCHAR, Types.VARCHAR},
handler));
}
private String getDateCreatedSqlCriteria(final String filterExpression) {
final StringBuilder result = new StringBuilder();
Collection<String> values = Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
values = (Collection<String>) new PartitionParser(
new StringReader(filterExpression)).filter().jjtAccept(new PartitionParamParserEval(),
null
);
} catch (Throwable ignored) {
//
}
}
for (String value : values) {
if (result.length() != 0) {
result.append(" and ");
}
result.append(value.replace("dateCreated", "p.CREATE_TIME"));
}
return result.toString();
}
private Void populateParameters(final List<Long> ids,
final String sql,
final String idName,
final Map<Long, Map<String, String>> params) {
if (ids.size() > 5000) {
final List<List<Long>> subFilterPartitionNamesList = Lists.partition(ids, 5000);
subFilterPartitionNamesList.forEach(subPartitions ->
params.putAll(this.getParameters(subPartitions, sql, idName)));
} else {
params.putAll(this.getParameters(ids, sql, idName));
}
return null;
}
private Map<Long, Map<String, String>> getParameters(final List<Long> ids, final String sql, final String idName) {
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(sql);
if (!ids.isEmpty()) {
queryBuilder.append(" and ").append(idName)
.append(" in ('").append(Joiner.on("','").skipNulls().join(ids)).append("')");
}
final ResultSetExtractor<Map<Long, Map<String, String>>> handler = rs -> {
final Map<Long, Map<String, String>> result = Maps.newHashMap();
while (rs.next()) {
final Long id = rs.getLong(idName);
final String key = rs.getString("param_key");
final String value = rs.getString("param_value");
final Map<String, String> parameters = result.computeIfAbsent(id, k -> Maps.newHashMap());
parameters.put(key, value);
}
return result;
};
return jdbcTemplate.query(queryBuilder.toString(), handler);
}
private Collection<String> getSinglePartitionExprs(@Nullable final String filterExpression) {
Collection<String> result = Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
result = (Collection<String>) new PartitionParser(
new StringReader(filterExpression)).filter().jjtAccept(config.escapePartitionNameOnFilter()
? new HivePartitionKeyParserEval() : new PartitionKeyParserEval(),
null
);
} catch (Throwable ignored) {
//
}
}
if (result != null) {
result = result.stream().filter(s -> !(s.startsWith("batchid=") || s.startsWith("dateCreated="))).collect(
Collectors.toList());
}
return result;
}
private String prepareFilterSql(@Nullable final String filterExpression) {
final StringBuilder result = new StringBuilder();
// Support for dateCreated
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
for (String singlePartitionExpr : singlePartitionExprs) {
result.append(" and p.PART_NAME like '%").append(singlePartitionExpr).append("%'");
}
if (!Strings.isNullOrEmpty(dateCreatedSqlCriteria)) {
result.append(" and ").append(dateCreatedSqlCriteria);
}
return result.toString();
}
private <T> List<T> getHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
//
// Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes)
// will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so
//
List<T> partitions = Lists.newArrayList();
if (partitionIds != null && partitionIds.size() > 5000) {
final List<List<String>> subFilterPartitionNamesList = Lists.partition(partitionIds, 5000);
final List<T> finalPartitions = partitions;
subFilterPartitionNamesList.forEach(
subPartitionIds -> finalPartitions.addAll(
this.getSubHandlerResultsFromQuery(
databaseName,
tableName,
filterExpression,
subPartitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
)
)
);
} else {
partitions = this.getSubHandlerResultsFromQuery(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
);
}
return partitions;
}
private <T> List<T> getSubHandlerResultsFromQuery(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
if (getSourceTableName(databaseName, tableName, forceDisableAudit).isPresent()) {
return this.getSubHandlerAuditTableResults(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable,
forceDisableAudit
);
} else {
return this.getSubHandlerResults(
databaseName,
tableName,
filterExpression,
partitionIds,
sql,
resultSetExtractor,
joinSql,
filterSql,
filterParams,
sort,
pageable
);
}
}
private <T> List<T> getSubHandlerResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
// Create the sql
final StringBuilder queryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql);
addSortPageableFilter(queryBuilder, filterExpression, sort, pageable);
List<T> partitions;
final ImmutableList.Builder<Object> paramsBuilder = ImmutableList.builder().add(databaseName, tableName);
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
final List<Object> params = paramsBuilder.build();
final Object[] oParams = new Object[params.size()];
partitions = (List) jdbcTemplate.query(
queryBuilder.toString(), params.toArray(oParams), resultSetExtractor);
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
partitions = processPageable(partitions, pageable);
}
return partitions;
}
/**
* Check if an audit table, i.e. the database is audit and the table name matches WAP table pattern
*
* @param databaseName database
* @param tableName table name
* @return true or false
*/
private Optional<QualifiedName> getSourceTableName(final String databaseName,
final String tableName,
final boolean forceDisableAudit) {
Optional<QualifiedName> sourceTable = Optional.empty();
final boolean isAuditProcessingEnabled = Boolean.valueOf(configuration
.getOrDefault(HiveConfigConstants.ENABLE_AUDIT_PROCESSING, "true"));
if (!forceDisableAudit && isAuditProcessingEnabled && databaseName.equals(AUDIT_DB)) {
final Matcher matcher = AUDIT_TABLENAME_PATTERN.matcher(tableName);
if (matcher.matches()) {
final String sourceDatabaseName = matcher.group("db");
final String sourceTableName = matcher.group("table");
sourceTable = Optional.of(QualifiedName.ofTable(this.catalogName, sourceDatabaseName, sourceTableName));
}
}
return sourceTable;
}
/**
* Process audit table partition related query.
*
* @param databaseName database name
* @param tableName table name
* @param filterExpression filter
* @param partitionIds partition ids
* @param sql query sql
* @param resultSetExtractor result extractor
* @param joinSql join sql
* @param filterSql filter sql
* @param filterParams filter parameters
* @param sort sort object
* @param pageable pageable object
* @param <T> query object
* @return query results
*/
private <T> List<T> getSubHandlerAuditTableResults(
final String databaseName,
final String tableName,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
final String sql,
final ResultSetExtractor resultSetExtractor,
@Nullable final String joinSql,
@Nullable final String filterSql,
@Nullable final List<Object> filterParams,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean forceDisableAudit
) {
final Optional<QualifiedName> sourceTableName = getSourceTableName(databaseName, tableName, forceDisableAudit);
List<T> partitions = Lists.newArrayList();
if (sourceTableName.isPresent()) {
final StringBuilder auditTableQueryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql);
final StringBuilder sourceTableQueryBuilder = getBasicPartitionQuery(partitionIds, sql, joinSql, filterSql)
.append(SQL.SQL_NOT_IN_AUTDI_TABLE_PARTITIONS);
//union the two queries, using ALL for optimization since the above sql already filtered out the overlap
//partitions from the source table
auditTableQueryBuilder.append(" UNION ALL ").append(sourceTableQueryBuilder);
addSortPageableFilter(auditTableQueryBuilder, filterExpression, sort, pageable);
// Params
final ImmutableList.Builder<Object> paramsBuilder = ImmutableList.builder().add(databaseName, tableName);
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
paramsBuilder.add(sourceTableName.get().getDatabaseName(), sourceTableName.get().getTableName());
if (partitionIds != null && !partitionIds.isEmpty()) {
paramsBuilder.addAll(partitionIds);
}
if (filterSql != null && filterParams != null) {
paramsBuilder.addAll(filterParams);
}
paramsBuilder.add(databaseName, tableName);
final List<Object> params = paramsBuilder.build();
final Object[] oParams = new Object[params.size()];
partitions = (List) jdbcTemplate.query(
auditTableQueryBuilder.toString(), params.toArray(oParams), resultSetExtractor);
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
partitions = processPageable(partitions, pageable);
}
}
return partitions;
}
private StringBuilder getBasicPartitionQuery(
@Nullable final List<String> partitionIds,
final String sql,
@Nullable final String joinSql,
@Nullable final String filterSql
) {
final StringBuilder tableQueryBuilder = new StringBuilder(sql);
if (joinSql != null) {
tableQueryBuilder.append(joinSql);
}
tableQueryBuilder.append(" where d.NAME = ? and t.TBL_NAME = ?");
if (filterSql != null) {
tableQueryBuilder.append(filterSql);
}
if (partitionIds != null && !partitionIds.isEmpty()) {
final List<String> paramVariables = partitionIds.stream().map(s -> "?").collect(Collectors.toList());
tableQueryBuilder.append(" and p.PART_NAME in (")
.append(Joiner.on(",").skipNulls().join(paramVariables)).append(")");
}
return tableQueryBuilder;
}
//adding the sort and limit to sql query
private void addSortPageableFilter(
final StringBuilder queryBuilder,
@Nullable final String filterExpression,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
if (pageable != null && pageable.isPageable() && Strings.isNullOrEmpty(filterExpression)) {
if (sort == null || !sort.hasSort()) {
queryBuilder.append(" order by id");
//this must be id, which is used by AuditTable and regular table pagination
}
queryBuilder.append(" limit ").append(pageable.getOffset()).append(',').append(pageable.getLimit());
}
}
private <T> List<T> processPageable(final List<T> partitions,
final Pageable pageable) {
int limit = pageable.getOffset() + pageable.getLimit();
if (partitions.size() < limit) {
limit = partitions.size();
}
if (pageable.getOffset() > limit) {
return Lists.newArrayList();
} else {
return partitions.subList(pageable.getOffset(), limit);
}
}
@VisibleForTesting
private static class SQL {
static final String SQL_GET_PARTITIONS_WITH_KEY_URI =
//Add p.part_id as id to allow pagination using 'order by id'
"select p.part_id as id, p.PART_NAME as name, p.CREATE_TIME as dateCreated, sds.location uri"
+ " from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID "
+ "join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID";
static final String SQL_GET_PARTITIONS_URI =
"select p.part_id as id, sds.location uri"
+ " from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID "
+ "join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID";
static final String SQL_GET_PARTITIONS_WITH_KEY =
"select p.part_id as id, p.PART_NAME as name from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID";
static final String SQL_GET_PARTITIONS =
"select p.part_id as id, p.PART_NAME as name, p.CREATE_TIME as dateCreated,"
+ " sds.location uri, sds.input_format, sds.output_format,"
+ " sds.sd_id, s.serde_id, s.slib from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID"
+ " join SERDES s on sds.SERDE_ID=s.SERDE_ID";
static final String SQL_GET_PARTITION_NAMES_BY_URI =
"select p.part_name partition_name,t.tbl_name table_name,d.name schema_name,"
+ " sds.location from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID"
+ " join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID where";
static final String SQL_GET_PARTITION_PARAMS =
"select part_id, param_key, param_value from PARTITION_PARAMS where 1=1";
static final String SQL_GET_SD_PARAMS =
"select sd_id, param_key, param_value from SD_PARAMS where 1=1";
static final String SQL_GET_SERDE_PARAMS =
"select serde_id, param_key, param_value from SERDE_PARAMS where 1=1";
static final String SQL_GET_PARTITION_KEYS =
"select pkey_name, pkey_type from PARTITION_KEYS as p "
+ "join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID where d.name=? and t.tbl_name=? order by integer_idx";
static final String SQL_GET_PARTITION_COUNT =
"select count(*) count from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID"
+ " where d.NAME = ? and t.TBL_NAME = ?";
//audit table, takes precedence in case there are parititons overlap with the source
static final String SQL_GET_AUDIT_TABLE_PARTITION_COUNT =
"select count(distinct p1.part_name) count from PARTITIONS as p1 "
+ "join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1 on t1.DB_ID = d1.DB_ID "
+ "where ( d1.NAME = ? and t1.TBL_NAME = ? ) "
+ "or ( d1.NAME = ? and t1.TBL_NAME = ?)";
// using nest order https://stackoverflow.com/questions/6965333/mysql-union-distinct
static final String SQL_GET_AUDIT_TABLE_PARTITION_KEYS =
"select pkey_name, pkey_type from ("
+ "(select pkey_name, pkey_type, integer_idx from PARTITION_KEYS as p1 "
+ "join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1 "
+ "on t1.DB_ID = d1.DB_ID where d1.NAME = ? and t1.TBL_NAME = ? "
+ ") UNION "
+ "(select pkey_name, pkey_type, integer_idx from PARTITION_KEYS as p2 "
+ "join TBLS as t2 on t2.TBL_ID = p2.TBL_ID join DBS as d2 "
+ "on t2.DB_ID = d2.DB_ID where d2.NAME = ? and t2.TBL_NAME = ?)) as pp order by integer_idx";
//select the partitions not in audit table
static final String SQL_NOT_IN_AUTDI_TABLE_PARTITIONS =
" and p.PART_NAME not in ("
+ " select p1.PART_NAME from PARTITIONS as p1"
+ " join TBLS as t1 on t1.TBL_ID = p1.TBL_ID join DBS as d1"
+ " on t1.DB_ID = d1.DB_ID where d1.NAME = ? and t1.TBL_NAME = ? )"; //audit table
}
}
| 1,610 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/PartitionSequenceIds.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
/**
* Class representing the ids for a partition.
*
* @author amajumdar
* @since 1.1.x
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class PartitionSequenceIds {
// id of the PARTITIONS table
private Long partId;
// id of the SDS table
private Long sdsId;
// id of the SERDES table
private Long serdeId;
}
| 1,611 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastTableServiceProxy.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.Cacheable;
/**
* Proxy class to get the metadata info from cache if exists.
*/
@CacheConfig(cacheNames = "metacat")
public class HiveConnectorFastTableServiceProxy {
private final IcebergTableHandler icebergTableHandler;
private final HiveConnectorInfoConverter hiveMetacatConverters;
private final CommonViewHandler commonViewHandler;
/**
* Constructor.
*
* @param hiveMetacatConverters hive converter
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
*/
public HiveConnectorFastTableServiceProxy(
final HiveConnectorInfoConverter hiveMetacatConverters,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler
) {
this.hiveMetacatConverters = hiveMetacatConverters;
this.icebergTableHandler = icebergTableHandler;
this.commonViewHandler = commonViewHandler;
}
/**
* Return the table metadata from cache if exists. If not exists, make the iceberg call to refresh it.
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param info table info stored in hive metastore
* @param includeInfoDetails if true, will include more details like the manifest file content
* @param useCache true, if table can be retrieved from cache
* @return TableInfo
*/
@Cacheable(key = "'iceberg.table.' + #includeInfoDetails + '.' + #tableMetadataLocation", condition = "#useCache")
public TableInfo getIcebergTable(final QualifiedName tableName,
final String tableMetadataLocation,
final TableInfo info,
final boolean includeInfoDetails,
final boolean useCache) {
final IcebergTableWrapper icebergTable =
this.icebergTableHandler.getIcebergTable(tableName, tableMetadataLocation, includeInfoDetails);
return this.hiveMetacatConverters.fromIcebergTableToTableInfo(tableName,
icebergTable, tableMetadataLocation, info);
}
/**
* Return the common view metadata from cache if exists. If not exists, make the common view handler call
* to refresh it.
* @param name common view name
* @param tableMetadataLocation common view metadata location
* @param info common view info stored in hive metastore
* @param hiveTypeConverter hive type converter
* @param useCache true, if table can be retrieved from cache
* @return TableInfo
*/
@Cacheable(key = "'iceberg.view.' + #tableMetadataLocation", condition = "#useCache")
public TableInfo getCommonViewTableInfo(final QualifiedName name,
final String tableMetadataLocation,
final TableInfo info,
final HiveTypeConverter hiveTypeConverter,
final boolean useCache) {
return commonViewHandler.getCommonViewTableInfo(name, tableMetadataLocation, info, hiveTypeConverter);
}
}
| 1,612 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/TableSequenceIds.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.sql;
import lombok.Getter;
import javax.annotation.Nullable;
/**
* Class representing the ids for a table.
*
* @author amajumdar
*/
@Getter
public class TableSequenceIds {
private final Long tableId;
private final Long cdId;
private final Long sdsId;
private final Long serdeId;
/**
* Constructor.
* @param tableId table id
* @param cdId column id
*/
public TableSequenceIds(final Long tableId,
final Long cdId) {
this(tableId, cdId, null, null);
}
/**
* Constructor.
* @param tableId table id
* @param cdId column id
* @param sdsId sds id
* @param serdeId serde id
*/
public TableSequenceIds(final Long tableId,
final Long cdId,
@Nullable final Long sdsId,
@Nullable final Long serdeId) {
this.tableId = tableId;
this.cdId = cdId;
this.sdsId = sdsId;
this.serdeId = serdeId;
}
}
| 1,613 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Connector implementation using direct sql calls.
*
* @author amajumdar
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.sql;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,614 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/test/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/test/java/com/netflix/metacat/connector/jdbc/services/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Tests for services.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc.services;
| 1,615 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/JdbcExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.SQLException;
/**
* An interface to map JDBC SQLExceptions to Metacat Connector Exceptions.
*
* @author tgianos
* @author zhenl
* @see ConnectorException
* @see SQLException
* @since 1.0.0
*/
public interface JdbcExceptionMapper {
/**
* Convert JDBC exception to MetacatException.
*
* @param se The sql exception to map
* @param name The qualified name of the resource that was attempting to be accessed when the exception occurred
* @return A best attempt at a corresponding connector exception or generic with the SQLException as the cause
*/
default ConnectorException toConnectorException(
@NonNull @Nonnull final SQLException se,
@Nonnull @NonNull final QualifiedName name
) {
return new ConnectorException(se.getMessage(), se);
}
}
| 1,616 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/JdbcTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Type converter utilities for JDBC connectors.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public abstract class JdbcTypeConverter implements ConnectorTypeConverter {
private static final Pattern TYPE_PATTERN = Pattern.compile(
"^\\s*?"
+ "(\\w+(?:\\s(?:precision|varying))?)" // group 0
+ "\\s*?"
+ "(?:\\(\\s*?(\\d+)(?:\\s*?,\\s*?(\\d+))?\\s*?\\))?" // group 1 and 2
+ "\\s*?"
+ "(\\[\\](?:\\[\\])?)?" // group 3
+ "(?:\\s*?(\\w+(?:\\s\\w+)*))?$" // group 4
);
protected String[] splitType(final String type) {
final Matcher matcher = TYPE_PATTERN.matcher(type);
final int numGroups = matcher.groupCount();
if (matcher.find()) {
final String[] split = new String[numGroups];
for (int i = 0; i < numGroups; i++) {
split[i] = matcher.group(i + 1);
}
return split;
} else {
throw new IllegalArgumentException("Unable to parse " + type);
}
}
protected Type toMetacatBitType(@Nonnull final String[] bit) {
// No size parameter
if (bit[1] == null || Integer.parseInt(bit[1]) == 1) {
return BaseType.BOOLEAN;
} else {
final int bytes = (int) Math.ceil(Double.parseDouble(bit[1]) / 8.0);
return VarbinaryType.createVarbinaryType(bytes);
}
}
protected DecimalType toMetacatDecimalType(@Nonnull final String[] splitType) {
if (splitType[1] == null && splitType[2] == null) {
return DecimalType.createDecimalType();
} else if (splitType[1] != null) {
final int precision = Integer.parseInt(splitType[1]);
if (splitType[2] == null) {
return DecimalType.createDecimalType(precision);
} else {
return DecimalType.createDecimalType(precision, Integer.parseInt(splitType[2]));
}
} else {
throw new IllegalArgumentException("Illegal definition of a decimal type: " + Arrays.toString(splitType));
}
}
protected Type toMetacatCharType(@Nonnull final String[] splitType) {
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for char type");
}
final int size = Integer.parseInt(splitType[1]);
// Check if we're dealing with binary or not
if (splitType[4] != null) {
if (!splitType[4].equals("binary")) {
throw new IllegalArgumentException(
"Unrecognized extra field in char type: " + splitType[4] + ". Expected 'binary'."
);
}
return VarbinaryType.createVarbinaryType(size);
} else {
return CharType.createCharType(size);
}
}
protected Type toMetacatVarcharType(@Nonnull final String[] splitType) {
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for varchar type");
}
final int size = Integer.parseInt(splitType[1]);
// Check if we're dealing with binary or not
if (splitType[4] != null) {
if (!splitType[4].equals("binary")) {
throw new IllegalArgumentException(
"Unrecognized extra field in varchar type: " + splitType[4] + ". Expected 'binary'."
);
}
return VarbinaryType.createVarbinaryType(size);
} else {
return VarcharType.createVarcharType(size);
}
}
protected VarbinaryType toMetacatVarbinaryType(@Nonnull final String[] splitType) {
if (!splitType[0].equals("varbinary") && !splitType[0].equals("binary")) {
// Blob
return VarbinaryType.createVarbinaryType(Integer.MAX_VALUE);
}
if (splitType[1] == null) {
throw new IllegalArgumentException("Must have size for varbinary type");
}
return VarbinaryType.createVarbinaryType(Integer.parseInt(splitType[1]));
}
protected Type toMetacatTimeType(@Nonnull final String[] splitType) {
if (splitType[4] != null && splitType[4].equals("with time zone")) {
return BaseType.TIME_WITH_TIME_ZONE;
} else {
return BaseType.TIME;
}
}
protected Type toMetacatTimestampType(@Nonnull final String[] splitType) {
if (splitType[4] != null && splitType[4].equals("with time zone")) {
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
} else {
return BaseType.TIMESTAMP;
}
}
}
| 1,617 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes related to getting metadata from a generic JDBC connection.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc;
| 1,618 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
import java.util.Locale;
/**
* Generic JDBC implementation of the ConnectorDatabaseService.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
@Getter
public class JdbcConnectorDatabaseService implements ConnectorDatabaseService {
private final DataSource dataSource;
private final JdbcExceptionMapper exceptionMapper;
/**
* Constructor.
*
* @param dataSource The jdbc datasource instance to use to make connections
* @param exceptionMapper The exception mapper to use
*/
@Inject
public JdbcConnectorDatabaseService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
this.dataSource = dataSource;
this.exceptionMapper = exceptionMapper;
}
/**
* {@inheritDoc}
*/
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo resource) {
final String databaseName = resource.getName().getDatabaseName();
log.debug("Beginning to create database {} for request {}", databaseName, context);
try (final Connection connection = this.dataSource.getConnection()) {
JdbcConnectorUtils.executeUpdate(connection, "CREATE DATABASE " + databaseName);
log.debug("Finished creating database {} for request {}", databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, resource.getName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
log.debug("Beginning to drop database {} for request {}", databaseName, context);
try (final Connection connection = this.dataSource.getConnection()) {
JdbcConnectorUtils.executeUpdate(connection, "DROP DATABASE " + databaseName);
log.debug("Finished dropping database {} for request {}", databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
log.debug("Beginning to get database metadata for {} for request {}", databaseName, context);
return DatabaseInfo.builder().name(name).build();
}
/**
* {@inheritDoc}
*/
@Override
public List<DatabaseInfo> list(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database metadata for catalog {} for request {}", catalogName, context);
final ImmutableList.Builder<DatabaseInfo> builder = ImmutableList.builder();
for (final QualifiedName dbName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, dbName));
}
log.debug("Finished listing database metadata for catalog {} for request {}", catalogName, context);
return builder.build();
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database names for catalog {} for request {}", catalogName, context);
try (final Connection connection = this.dataSource.getConnection()) {
final DatabaseMetaData metaData = connection.getMetaData();
final List<QualifiedName> names = Lists.newArrayList();
try (final ResultSet schemas = prefix == null || StringUtils.isEmpty(prefix.getDatabaseName())
? metaData.getSchemas(connection.getCatalog(), null)
: metaData
.getSchemas(
connection.getCatalog(),
prefix.getDatabaseName() + JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
)
) {
while (schemas.next()) {
final String schemaName = schemas.getString("TABLE_SCHEM").toLowerCase(Locale.ENGLISH);
// skip internal schemas
if (!schemaName.equals("information_schema")) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
}
}
}
// Does user want sorting?
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing database names for catalog {} for request {}", catalogName, context);
return results;
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
}
| 1,619 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLDataException;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
/**
* Generic JDBC implementation of the ConnectorTableService.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
@Getter
public class JdbcConnectorTableService implements ConnectorTableService {
@SuppressFBWarnings
protected static final String[] TABLE_TYPES = {"TABLE", "VIEW"};
static final String[] TABLE_TYPE = {"TABLE"};
private static final String EMPTY = "";
private static final String COMMA_SPACE = ", ";
private static final String UNSIGNED = "unsigned";
private static final String ZERO = "0";
private static final char LEFT_PAREN = '(';
private static final char RIGHT_PAREN = ')';
private static final char SPACE = ' ';
protected final DataSource dataSource;
protected final JdbcExceptionMapper exceptionMapper;
private final JdbcTypeConverter typeConverter;
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public JdbcConnectorTableService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcTypeConverter typeConverter,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
this.dataSource = dataSource;
this.typeConverter = typeConverter;
this.exceptionMapper = exceptionMapper;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
final String tableName = name.getTableName();
log.debug("Attempting to delete table {} from database {} for request {}", tableName, databaseName, context);
try (Connection connection = this.getConnection(name.getDatabaseName())) {
JdbcConnectorUtils.executeUpdate(connection, this.getDropTableSql(name, tableName));
log.debug("Deleted table {} from database {} for request {}", tableName, databaseName, context);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Beginning to get table metadata for qualified name {} for request {}", name, context);
try (Connection connection = this.getConnection(name.getDatabaseName())) {
final ImmutableList.Builder<FieldInfo> fields = ImmutableList.builder();
try (ResultSet columns = this.getColumns(connection, name)) {
while (columns.next()) {
final String type = columns.getString("TYPE_NAME");
final String size = columns.getString("COLUMN_SIZE");
final String precision = columns.getString("DECIMAL_DIGITS");
final String sourceType = this.buildSourceType(type, size, precision);
final FieldInfo.FieldInfoBuilder fieldInfo = FieldInfo.builder()
.name(columns.getString("COLUMN_NAME"))
.sourceType(sourceType)
.type(this.typeConverter.toMetacatType(sourceType))
.comment(columns.getString("REMARKS"))
.isNullable(columns.getString("IS_NULLABLE").equals("YES"))
.defaultValue(columns.getString("COLUMN_DEF"));
if (size != null) {
fieldInfo.size(Integer.parseInt(size));
}
fields.add(fieldInfo.build());
}
}
final List<FieldInfo> fieldInfos = fields.build();
// If table does not exist, throw TableNotFoundException.
if (fieldInfos.isEmpty() && !exists(context, name)) {
throw new TableNotFoundException(name);
}
// Set table details
final TableInfo result = TableInfo.builder().name(name).fields(fields.build()).build();
setTableInfoDetails(connection, result);
log.debug("Finished getting table metadata for qualified name {} for request {}", name, context);
return result;
} catch (final SQLException se) {
throw new ConnectorException(se.getMessage(), se);
}
}
/**
* Set the table info details, if any.
*
* @param connection db connection
* @param tableInfo table info
*/
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
}
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Beginning to list table metadata for {} for request {}", name, context);
final ImmutableList.Builder<TableInfo> builder = ImmutableList.builder();
for (final QualifiedName tableName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, tableName));
}
log.debug("Finished listing table metadata for {} for request {}", name, context);
return builder.build();
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Beginning to list tables names for qualified name {} for request {}", name, context);
final String catalog = name.getCatalogName();
final String database = name.getDatabaseName();
try (Connection connection = this.getConnection(database)) {
final List<QualifiedName> names = Lists.newArrayList();
try (ResultSet tables = this.getTables(connection, name, prefix)) {
while (tables.next()) {
names.add(QualifiedName.ofTable(catalog, database, tables.getString("TABLE_NAME")));
}
}
// Does user want sorting?
if (sort != null) {
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getTableName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing tables names for qualified name {} for request {}", name, context);
return results;
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public void rename(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName
) {
final String oldDatabaseName = oldName.getDatabaseName();
final String newDatabaseName = newName.getDatabaseName();
final String oldTableName = oldName.getTableName();
final String newTableName = newName.getTableName();
log.debug(
"Attempting to re-name table {}/{} to {}/{} for request {}",
oldDatabaseName,
oldTableName,
newDatabaseName,
newTableName,
context
);
if (!oldDatabaseName.equals(newDatabaseName)) {
throw new IllegalArgumentException(
"Database names must match and they are " + oldDatabaseName + " and " + newDatabaseName
);
}
try (Connection connection = this.getConnection(oldDatabaseName)) {
connection.setSchema(oldDatabaseName);
JdbcConnectorUtils.executeUpdate(
connection,
this.getRenameTableSql(oldName, oldTableName, newTableName)
);
log.debug(
"Renamed table {}/{} to {}/{} for request {}",
oldDatabaseName,
oldTableName,
newDatabaseName,
newTableName,
context
);
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, oldName);
}
}
protected Connection getConnection(@Nonnull @NonNull final String schema) throws SQLException {
final Connection connection = this.dataSource.getConnection();
connection.setSchema(schema);
return connection;
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
try (Connection connection = this.dataSource.getConnection()) {
final String databaseName = name.getDatabaseName();
connection.setSchema(databaseName);
final DatabaseMetaData metaData = connection.getMetaData();
final ResultSet rs = metaData.getTables(databaseName, databaseName, name.getTableName(), TABLE_TYPE);
if (rs.next()) {
result = true;
}
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
return result;
}
/**
* Get the tables. See {@link java.sql.DatabaseMetaData#getTables(String, String, String, String[]) getTables} for
* expected format of the ResultSet columns.
*
* @param connection The database connection to use
* @param name The qualified name of the database to get tables for
* @param prefix An optional database table name prefix to search for
* @return The result set with columns as described in the getTables method from java.sql.DatabaseMetaData
* @throws SQLException on query error
*/
protected ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix
) throws SQLException {
final String database = name.getDatabaseName();
final DatabaseMetaData metaData = connection.getMetaData();
return prefix == null || StringUtils.isEmpty(prefix.getTableName())
? metaData.getTables(database, database, null, TABLE_TYPES)
: metaData
.getTables(
database,
database,
prefix.getTableName() + JdbcConnectorUtils.MULTI_CHARACTER_SEARCH,
TABLE_TYPES
);
}
/**
* Get the columns for a table. See
* {@link java.sql.DatabaseMetaData#getColumns(String, String, String, String) getColumns} for format of the
* ResultSet columns.
*
* @param connection The database connection to use
* @param name The qualified name of the table to get the column descriptions for
* @return The result set of information
* @throws SQLException on query error
*/
protected ResultSet getColumns(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name
) throws SQLException {
final String database = name.getDatabaseName();
final DatabaseMetaData metaData = connection.getMetaData();
return metaData.getColumns(
database,
database,
name.getTableName(),
JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
);
}
/**
* Rebuild a source type definition.
*
* @param type The base type e.g. VARCHAR
* @param size The size if applicable to the {@code type}
* @param precision The precision if applicable to the {@code type} e.g. DECIMAL's
* @return The representation of source type e.g. INTEGER, VARCHAR(50) or DECIMAL(20, 10)
* @throws SQLDataException When size or precision can't be parsed to integers if non null
*/
protected String buildSourceType(
@Nonnull @NonNull final String type,
@Nullable final String size,
@Nullable final String precision
) throws SQLDataException {
if (size != null) {
final int sizeInt;
try {
sizeInt = Integer.parseInt(size);
} catch (final NumberFormatException nfe) {
throw new SQLDataException("Size field could not be converted to integer", nfe);
}
// Make sure if the type is unsigned it's created correctly
final String baseType;
final String afterMagnitude;
final int unsignedIndex = StringUtils.indexOfIgnoreCase(type, UNSIGNED);
if (unsignedIndex != -1) {
baseType = StringUtils.trim(type.substring(0, unsignedIndex));
afterMagnitude = type.substring(unsignedIndex);
} else {
baseType = type;
afterMagnitude = null;
}
if (precision != null) {
final int precisionInt;
try {
precisionInt = Integer.parseInt(precision);
} catch (final NumberFormatException nfe) {
throw new SQLDataException("Precision field could not be converted to integer", nfe);
}
return baseType
+ LEFT_PAREN
+ sizeInt
+ COMMA_SPACE
+ precisionInt
+ RIGHT_PAREN
+ (afterMagnitude != null ? SPACE + afterMagnitude : EMPTY);
} else {
return baseType
+ LEFT_PAREN
+ sizeInt
+ RIGHT_PAREN
+ (afterMagnitude != null ? SPACE + afterMagnitude : EMPTY);
}
} else {
return type;
}
}
/**
* Build the SQL for renaming a table out of the components provided. SQL will be executed.
* @param oldName The fully qualified name for the current table
* @param finalOldTableName The string for what the current table should be called in the sql
* @param finalNewTableName The string for what the new name fo the table should be in the sql
* @return The rename table sql to execute
*/
protected String getRenameTableSql(
final QualifiedName oldName,
final String finalOldTableName,
final String finalNewTableName
) {
return "ALTER TABLE " + finalOldTableName + " RENAME TO " + finalNewTableName;
}
/**
* Get the SQL for dropping the given table.
*
* @param name The fully qualified name of the table
* @param finalTableName The final table name that should be dropped
* @return The SQL to execute to drop the table
*/
protected String getDropTableSql(final QualifiedName name, final String finalTableName) {
return "DROP TABLE " + finalTableName;
}
}
| 1,620 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
/**
* Generic JDBC implementation of the ConnectorPartitionService.
*
* @author tgianos
* @since 1.0.0
*/
public class JdbcConnectorPartitionService implements ConnectorPartitionService {
}
| 1,621 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/JdbcConnectorUtils.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.jdbc.services;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
/**
* Utility methods for working with JDBC connections.
*
* @author tgianos
* @since 1.0.0
*/
public final class JdbcConnectorUtils extends ConnectorUtils {
/**
* The string used for multi character search in SQL.
*/
public static final String MULTI_CHARACTER_SEARCH = "%";
/**
* The string used for single character search in SQL.
*/
public static final String SINGLE_CHARACTER_SEARCH = "_";
/**
* Utility class constructor private.
*/
protected JdbcConnectorUtils() {
}
/**
* Execute a SQL update statement against the given datasource.
*
* @param connection The connection to attempt to execute an update against
* @param sql The sql to execute
* @return The number of rows updated or exception
* @throws SQLException on error during execution of the update to the underlying SQL data store
*/
static int executeUpdate(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final String sql
) throws SQLException {
try (final Statement statement = connection.createStatement()) {
return statement.executeUpdate(sql);
}
}
}
| 1,622 |
0 |
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc
|
Create_ds/metacat/metacat-connector-jdbc/src/main/java/com/netflix/metacat/connector/jdbc/services/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Implementations of the Metacat connector service interfaces for generic JDBC connections.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.jdbc.services;
| 1,623 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/Client.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.guava.GuavaModule;
import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule;
import com.google.common.base.Preconditions;
import com.netflix.metacat.client.api.TagV1;
import com.netflix.metacat.client.module.JacksonDecoder;
import com.netflix.metacat.client.module.JacksonEncoder;
import com.netflix.metacat.client.module.MetacatErrorDecoder;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.client.api.MetacatV1;
import com.netflix.metacat.client.api.MetadataV1;
import com.netflix.metacat.client.api.PartitionV1;
import com.netflix.metacat.client.api.ResolverV1;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import feign.Feign;
import feign.Request;
import feign.RequestInterceptor;
import feign.Retryer;
import feign.jaxrs.JAXRSContract;
import feign.slf4j.Slf4jLogger;
import lombok.extern.slf4j.Slf4j;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSocketFactory;
import java.util.concurrent.TimeUnit;
/**
* Client to communicate with Metacat. This version depends on the Feign library.
*
* @author amajumdar
*/
@Slf4j
public final class Client {
private final MetacatV1 api;
private final Feign.Builder feignBuilder;
private final String host;
private final PartitionV1 partitionApi;
private final MetadataV1 metadataApi;
private final ResolverV1 resolverApi;
private final TagV1 tagApi;
private Client(
final String host,
final feign.Client client,
final feign.Logger.Level logLevel,
final RequestInterceptor requestInterceptor,
final Retryer retryer,
final Request.Options options
) {
final MetacatJsonLocator metacatJsonLocator = new MetacatJsonLocator();
final ObjectMapper mapper = metacatJsonLocator
.getPrettyObjectMapper()
.copy()
.registerModule(new GuavaModule())
.registerModule(new JaxbAnnotationModule());
log.info("Connecting to {}", host);
this.host = host;
feignBuilder = Feign.builder()
.client(client)
.logger(new Slf4jLogger())
.logLevel(logLevel)
.contract(new JAXRSContract())
.encoder(new JacksonEncoder(mapper))
.decoder(new JacksonDecoder(mapper))
.errorDecoder(new MetacatErrorDecoder(metacatJsonLocator))
.requestInterceptor(requestInterceptor)
.retryer(retryer)
.options(options);
api = getApiClient(MetacatV1.class);
partitionApi = getApiClient(PartitionV1.class);
metadataApi = getApiClient(MetadataV1.class);
resolverApi = getApiClient(ResolverV1.class);
tagApi = getApiClient(TagV1.class);
}
/**
* Returns the client builder.
*
* @return Builder to create the metacat client
*/
public static Builder builder() {
return new Builder();
}
/**
* Returns an API instance that conforms to the given API Type that can communicate with the Metacat server.
*
* @param apiType apiType A JAX-RS annotated Metacat interface
* @param <T> API Resource instance
* @return An instance that implements the given interface and is wired up to communicate with the Metacat server.
*/
public <T> T getApiClient(final Class<T> apiType) {
Preconditions.checkArgument(apiType.isInterface(), "apiType must be an interface");
return feignBuilder.target(apiType, host);
}
/**
* Return an API instance that can be used to interact with the metacat server.
*
* @return An instance api conforming to MetacatV1 interface
*/
public MetacatV1 getApi() {
return api;
}
/**
* Return an API instance that can be used to interact with the metacat server for partitions.
*
* @return An instance api conforming to PartitionV1 interface
*/
public PartitionV1 getPartitionApi() {
return partitionApi;
}
/**
* Return an API instance that can be used to interact with the metacat server for only user metadata.
*
* @return An instance api conforming to MetadataV1 interface
*/
public MetadataV1 getMetadataApi() {
return metadataApi;
}
/**
* Return an API instance that can be used to interact with
* the metacat server for getting the qualified name by uri.
*
* @return An instance api conforming to ResolverV1 interface
*/
public ResolverV1 getResolverApi() {
return resolverApi;
}
/**
* Return an API instance that can be used to interact with
* the metacat server for tagging metadata.
* @return An instance api conforming to TagV1 interface
*/
public TagV1 getTagApi() {
return tagApi;
}
/**
* Builder class to build the metacat client.
*/
public static class Builder {
private String host;
private String userName;
private feign.Client client;
private String clientAppName;
private String jobId;
private String dataTypeContext;
private feign.Logger.Level logLevel;
private Retryer retryer;
private RequestInterceptor requestInterceptor;
private Request.Options requestOptions;
private SSLSocketFactory sslSocketFactory;
private HostnameVerifier hostnameVerifier;
/**
* Sets the SSLSocketFactory. This field is ignored when the full Feign client is specified.
*
* @param sslFactory the SSLSocketFactory
* @return Builder
*/
public Builder withSSLSocketFactory(final SSLSocketFactory sslFactory) {
this.sslSocketFactory = sslFactory;
return this;
}
/**
* Sets the HostnameVerifier. This field is ignored when the full Feign client is specified.
*
* @param hostVerifier the HostnameVerifier
* @return Builder
*/
public Builder withHostnameVerifier(final HostnameVerifier hostVerifier) {
this.hostnameVerifier = hostVerifier;
return this;
}
/**
* Sets the log level for the client.
*
* @param clientLogLevel log level
* @return Builder
*/
public Builder withLogLevel(final feign.Logger.Level clientLogLevel) {
this.logLevel = clientLogLevel;
return this;
}
/**
* Sets the server host name.
*
* @param serverHost server host to connect
* @return Builder
*/
public Builder withHost(final String serverHost) {
this.host = serverHost;
return this;
}
/**
* Sets the retryer logic for the client.
*
* @param clientRetryer retry implementation
* @return Builder
*/
public Builder withRetryer(final Retryer clientRetryer) {
this.retryer = clientRetryer;
return this;
}
/**
* Sets the user name to pass in the request header.
*
* @param requestUserName user name
* @return Builder
*/
public Builder withUserName(final String requestUserName) {
this.userName = requestUserName;
return this;
}
/**
* Sets the application name to pass in the request header.
*
* @param appName application name
* @return Builder
*/
public Builder withClientAppName(final String appName) {
this.clientAppName = appName;
return this;
}
/**
* Sets the job id to pass in the request header.
*
* @param clientJobId job id
* @return Builder
*/
public Builder withJobId(final String clientJobId) {
this.jobId = clientJobId;
return this;
}
/**
* Sets the Client implementation to use.
*
* @param feignClient Feign Client
* @return Builder
*/
public Builder withClient(final feign.Client feignClient) {
this.client = feignClient;
return this;
}
/**
* Sets the data type context to pass in the request header.
*
* @param requestDataTypeContext Data type conext
* @return Builder
*/
public Builder withDataTypeContext(final String requestDataTypeContext) {
this.dataTypeContext = requestDataTypeContext;
return this;
}
/**
* Sets the request interceptor.
*
* @param clientRrequestInterceptor request interceptor
* @return Builder
*/
public Builder withRequestInterceptor(final RequestInterceptor clientRrequestInterceptor) {
this.requestInterceptor = clientRrequestInterceptor;
return this;
}
/**
* Sets the request options.
*
* @param clientRequestOptions request options
* @return Builder
*/
public Builder withRequestOptions(final Request.Options clientRequestOptions) {
this.requestOptions = clientRequestOptions;
return this;
}
/**
* Builds the Metacat client.
*
* @return Client that can be used to make metacat API calls.
*/
public Client build() {
Preconditions.checkArgument(userName != null, "User name cannot be null");
Preconditions.checkArgument(clientAppName != null, "Client application name cannot be null");
if (host == null) {
host = System.getProperty("netflix.metacat.host", System.getenv("NETFLIX_METACAT_HOST"));
}
Preconditions.checkArgument(host != null, "Host cannot be null");
if (retryer == null) {
//
// Retry exponentially with a starting delay of 500ms for upto 3 retries.
//
retryer = new Retryer.Default(TimeUnit.MILLISECONDS.toMillis(500), TimeUnit.MINUTES.toMillis(2), 3);
}
final RequestInterceptor interceptor = template -> {
template.header(MetacatRequestContext.HEADER_KEY_USER_NAME, userName);
template.header(MetacatRequestContext.HEADER_KEY_CLIENT_APP_NAME, clientAppName);
template.header(MetacatRequestContext.HEADER_KEY_JOB_ID, jobId);
template.header(MetacatRequestContext.HEADER_KEY_DATA_TYPE_CONTEXT, dataTypeContext);
if (requestInterceptor != null) {
requestInterceptor.apply(template);
}
};
if (requestOptions == null) {
//
// connection timeout: 30secs, socket timeout: 60secs
//
requestOptions = new Request.Options((int) TimeUnit.SECONDS.toMillis(30),
(int) TimeUnit.MINUTES.toMillis(1));
}
if (logLevel == null) {
logLevel = feign.Logger.Level.BASIC;
}
if (client == null) {
client = new feign.Client.Default(sslSocketFactory, hostnameVerifier);
}
return new Client(host, client, logLevel, interceptor, retryer, requestOptions);
}
}
}
| 1,624 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Client library for Metacat.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.client;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,625 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonDecoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.RuntimeJsonMappingException;
import feign.Response;
import feign.codec.Decoder;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.Type;
import java.net.HttpURLConnection;
/**
* Decoder for Metacat response.
*
* @author amajumdar
*/
public class JacksonDecoder implements Decoder {
private static final String NO_CONTENT_MESSAGE = "No content to map due to end-of-input";
private final ObjectMapper mapper;
/**
* Constructor.
*
* @param mapper Jackson mapper for Metacat response.
*/
public JacksonDecoder(@Nonnull @NonNull final ObjectMapper mapper) {
this.mapper = mapper;
}
/**
* {@inheritDoc}
*/
@Override
public Object decode(final Response response, final Type type) throws IOException {
if (
response.status() == HttpURLConnection.HTTP_NO_CONTENT
|| response.body() == null
|| (response.body().length() != null && response.body().length() == 0)
) {
return null;
}
try (final Reader reader = response.body().asReader()) {
return this.mapper.readValue(reader, this.mapper.constructType(type));
} catch (final JsonMappingException jme) {
// The case where for whatever reason (most likely bad design) where the server returned OK and
// trying to de-serialize the content had no content (e.g. the return status should have been no-content)
if (response.status() == HttpURLConnection.HTTP_OK
&& jme.getMessage().startsWith(NO_CONTENT_MESSAGE)) {
return null;
}
throw jme;
} catch (final RuntimeJsonMappingException e) {
if (e.getCause() != null && e.getCause() instanceof IOException) {
throw IOException.class.cast(e.getCause());
}
throw e;
}
}
}
| 1,626 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/MetacatErrorDecoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Strings;
import com.netflix.metacat.common.exception.MetacatAlreadyExistsException;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.exception.MetacatPreconditionFailedException;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.exception.MetacatUnAuthorizedException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonException;
import feign.Response;
import feign.RetryableException;
import feign.Util;
import lombok.AllArgsConstructor;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Date;
/**
* Module that provides a error decoder, used to parse errors.
*
* @author amajumdar
*/
@AllArgsConstructor
public class MetacatErrorDecoder extends feign.codec.ErrorDecoder.Default {
private final MetacatJson metacatJson;
/**
* {@inheritDoc}
*/
@Override
public Exception decode(final String methodKey, final Response response) {
try {
String message = "";
if (response.body() != null) {
message = Util.toString(response.body().asReader());
try {
final ObjectNode body = metacatJson.parseJsonObject(message);
message = body.path("error").asText();
if (Strings.isNullOrEmpty(message)) {
message = body.path("message").asText("No error message supplied.");
}
} catch (final MetacatJsonException ignored) {
}
}
switch (response.status()) {
case 501: //NOT IMPLEMENTED
case 415: //UNSUPPORTED_MEDIA_TYPE
return new MetacatNotSupportedException(message);
case 400: //BAD_REQUEST
return new MetacatBadRequestException(message);
case 403: //Forbidden
return new MetacatUnAuthorizedException(message);
case 404: //NOT_FOUND
return new MetacatNotFoundException(message);
case 409: //CONFLICT
return new MetacatAlreadyExistsException(message);
case 412: // PRECONDITION_FAILED
return new MetacatPreconditionFailedException(message);
case 429:
return new RetryableException(response.status(), message,
response.request() == null ? null : response.request().httpMethod(),
new MetacatTooManyRequestsException(message),
Date.from(Instant.now().plus(1, ChronoUnit.MINUTES)), response.request());
case 500: //INTERNAL_SERVER_ERROR
case 503: //SERVICE_UNAVAILABLE
return new RetryableException(response.status(), message,
response.request() == null ? null : response.request().httpMethod(),
new MetacatException(message), null, response.request());
default:
return new MetacatException(message);
}
} catch (final IOException e) {
return super.decode(methodKey, response);
}
}
}
| 1,627 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonEncoder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.module;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import feign.RequestTemplate;
import feign.codec.EncodeException;
import feign.codec.Encoder;
import java.lang.reflect.Type;
/**
* Encoder for Metacat request.
*
* @author amajumdar
*/
public class JacksonEncoder implements Encoder {
private final ObjectMapper mapper;
/**
* Constructor.
*
* @param mapper Jackson mapper for Metacat request
*/
public JacksonEncoder(final ObjectMapper mapper) {
this.mapper = mapper;
}
/**
* Converts objects to an appropriate representation in the template.
*
* @param object what to encode as the request body.
* @param bodyType the type the object should be encoded as. {@code Map<String, ?>}, if form
* encoding.
* @param template the request template to populate.
* @throws feign.codec.EncodeException when encoding failed due to a checked exception.
*/
@Override
public void encode(final Object object, final Type bodyType, final RequestTemplate template)
throws EncodeException {
try {
template.body(mapper.writeValueAsString(object));
} catch (JsonProcessingException e) {
throw new EncodeException(e.getMessage(), e);
}
}
}
| 1,628 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/module/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Client Feign related library for Metacat.
*
* @author amajumdar
*/
package com.netflix.metacat.client.module;
| 1,629 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/TagV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TagCreateRequestDto;
import com.netflix.metacat.common.dto.TagRemoveRequestDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
import java.util.Set;
/**
* APIs to manipulate the tags.
*
* @author amajumdar
*/
@Path("mds/v1/tag")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface TagV1 {
/**
* Return the list of tags.
*
* @return list of tags
*/
@GET
@Path("tags")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> getTags();
/**
* Returns the list of qualified names for the given input.
*
* @param includeTags Set of matching tags
* @param excludeTags Set of un-matching tags
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @param type Qualified name type category, database, table
* @return list of qualified names
*/
@GET
@Path("list")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> list(
@QueryParam("include")
Set<String> includeTags,
@QueryParam("exclude")
Set<String> excludeTags,
@QueryParam("sourceName")
String sourceName,
@QueryParam("databaseName")
String databaseName,
@QueryParam("tableName")
String tableName,
@QueryParam("type")
QualifiedName.Type type
);
/**
* Returns the list of qualified names that are tagged with tags containing the given tagText.
*
* @param tag Tag partial text
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @return list of qualified names
*/
@GET
@Path("search")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> search(
@QueryParam("tag")
String tag,
@QueryParam("sourceName")
String sourceName,
@QueryParam("databaseName")
String databaseName,
@QueryParam("tableName")
String tableName
);
/**
* Sets the tags on the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param tags set of tags
* @return set of tags
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> setTableTags(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
Set<String> tags
);
/**
* Sets the tags on the given qualified name.
*
* @param tagCreateRequestDto tag create request dto
* @return set of tags
*/
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Set<String> setTags(
TagCreateRequestDto tagCreateRequestDto
);
/**
* Remove the tags on the given qualified name.
*
* @param tagRemoveRequestDto tag remove request dto
*/
@DELETE
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void removeTags(
TagRemoveRequestDto tagRemoveRequestDto
);
/**
* Remove the tags from the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param deleteAll True if all tags need to be removed
* @param tags Tags to be removed from the given table
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void removeTableTags(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@DefaultValue("false")
@QueryParam("all")
Boolean deleteAll,
Set<String> tags
);
}
| 1,630 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/SearchMetacatV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.TableDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Search APIs for metacat that queries the search store.
* @author amajumdar
*/
@Path("mds/v1/search")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface SearchMetacatV1 {
/**
* Searches the list of tables for the given search string.
* @param searchString search string
* @return list of tables
*/
@GET
@Path("table")
@Consumes(MediaType.APPLICATION_JSON)
List<TableDto> searchTables(
@QueryParam("q")
String searchString
);
}
| 1,631 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/PartitionV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.SortOrder;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Metacat API for managing partition.
*
* @author amajumdar
*/
@Path("mds/v1/partition")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface PartitionV1 {
/**
* Delete named partitions from a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionIds lis of partition names
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deletePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
List<String> partitionIds
);
/**
* Delete partitions for the given view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName metacat view name
* @param partitionIds list of partition names
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deletePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
List<String> partitionIds
);
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata
);
/**
* Return list of partitions for a metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a metacat view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata
);
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitionsForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partitions for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<PartitionDto> getPartitionsForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeys(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeys(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeysForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionKeysForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUris(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUris(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("filter")
String filter,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit
);
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUrisForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Return list of partition uris for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a view
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris-request")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<String> getPartitionUrisForRequest(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Add/update partitions to the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
PartitionsSaveResponseDto savePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
PartitionsSaveRequestDto partitionsSaveRequestDto
);
/**
* Add/update partitions to the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
PartitionsSaveResponseDto savePartitions(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
PartitionsSaveRequestDto partitionsSaveRequestDto
);
/**
* Get the partition count for the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return partition count for the given table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/count")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Integer getPartitionCount(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Get the partition count for the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return partition count for the given view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/count")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
Integer getPartitionCount(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
}
| 1,632 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/MetacatV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import com.netflix.metacat.common.dto.DatabaseCreateRequestDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.HEAD;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
/**
* Metacat API for managing catalog/database/table/mview.
*
* @author amajumdar
*/
@Path("mds/v1")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface MetacatV1 {
/**
* Creates a new catalog.
*
* @param createCatalogDto catalog
*/
@POST
@Path("catalog")
void createCatalog(CreateCatalogDto createCatalogDto);
/**
* Creates the given database in the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
* @param databaseCreateRequestDto database create request
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void createDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
DatabaseCreateRequestDto databaseCreateRequestDto
);
/**
* Creates a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table TableDto with table details
* @return created <code>TableDto</code> table
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto createTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
TableDto table
);
/**
* Creates a metacat view. A staging table that can contain partitions referring to the table partition locations.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param snapshot boolean to snapshot or not
* @param filter filter expression to use
* @return created <code>TableDto</code> mview
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto createMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
@DefaultValue("false")
@QueryParam("snapshot")
Boolean snapshot,
@QueryParam("filter")
String filter
);
/**
* Deletes the given database from the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deleteDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName
);
/**
* Delete table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return deleted <code>TableDto</code> table.
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto deleteTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Delete metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return deleted <code>TableDto</code> mview.
*/
@DELETE
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto deleteMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
/**
* Get the catalog by name.
*
* @param catalogName catalog name
* @return catalog
*/
@GET
@Path("catalog/{catalog-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
CatalogDto getCatalog(
@PathParam("catalog-name")
String catalogName
);
/**
* List registered catalogs.
*
* @return registered catalogs.
*/
@GET
@Path("catalog")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<CatalogMappingDto> getCatalogNames();
/**
* Get the database with the list of table names under it.
*
* @param catalogName catalog name
* @param databaseName database name
* @param includeUserMetadata true if details should include user metadata
* @param includeTableNames if true, include the list of table names
* @return database with details
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
DatabaseDto getDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@DefaultValue("true")
@QueryParam("includeUserMetadata")
Boolean includeUserMetadata,
@DefaultValue("true")
@QueryParam("includeTableNames")
Boolean includeTableNames
);
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @return table
*/
default TableDto getTable(
String catalogName,
String databaseName,
String tableName,
Boolean includeInfo,
Boolean includeDefinitionMetadata,
Boolean includeDataMetadata
) {
return getTable(catalogName, databaseName, tableName, includeInfo,
includeDefinitionMetadata, includeDataMetadata, false);
}
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @param includeInfoDetails true if the more info details to be included
* @return table
*/
default TableDto getTable(
String catalogName,
String databaseName,
String tableName,
Boolean includeInfo,
Boolean includeDefinitionMetadata,
Boolean includeDataMetadata,
Boolean includeInfoDetails
) {
return getTable(catalogName, databaseName, tableName, includeInfo,
includeDefinitionMetadata, includeDataMetadata, includeInfoDetails, false);
}
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @param includeInfoDetails true if the more info details to be included
* @param includeMetadataLocationOnly true if only metadata location needs to be included.
* All other flags are ignored if this is set to true.
* @return table
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto getTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@DefaultValue("true")
@QueryParam("includeInfo")
Boolean includeInfo,
@DefaultValue("true")
@QueryParam("includeDefinitionMetadata")
Boolean includeDefinitionMetadata,
@DefaultValue("true")
@QueryParam("includeDataMetadata")
Boolean includeDataMetadata,
@DefaultValue("false")
@QueryParam("includeInfoDetails")
Boolean includeInfoDetails,
@DefaultValue("false")
@QueryParam("includeMetadataLocationOnly")
Boolean includeMetadataLocationOnly
);
/**
* Returns true, if table exists.
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return true, if table exists.
*/
default boolean doesTableExist(String catalogName, String databaseName, String tableName) {
boolean result = true;
try {
tableExists(catalogName, databaseName, tableName);
} catch (MetacatNotFoundException e) {
result = false;
}
return result;
}
/**
* Check if the table exists.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
*/
@HEAD
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
void tableExists(@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName);
/**
* Returns a filtered list of table names.
* @param catalogName catalog name
* @param filter filter expression
* @param limit list size
* @return list of table names
*/
@GET
@Path("catalog/{catalog-name}/table-names")
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> getTableNames(
@PathParam("catalog-name")
final String catalogName,
@QueryParam("filter")
final String filter,
@QueryParam("limit")
Integer limit
);
/**
* Returns a filtered list of table names.
* @param catalogName catalog name
* @param databaseName database name
* @param filter filter expression
* @param limit list size
* @return list of table names
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table-names")
@Produces(MediaType.APPLICATION_JSON)
List<QualifiedName> getTableNames(
@PathParam("catalog-name")
final String catalogName,
@PathParam("database-name")
final String databaseName,
@QueryParam("filter")
final String filter,
@QueryParam("limit")
Integer limit
);
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @return list of metacat view names.
*/
@GET
@Path("catalog/{catalog-name}/mviews")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<NameDateDto> getMViews(
@PathParam("catalog-name")
String catalogName
);
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return List of metacat view names.
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mviews")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
List<NameDateDto> getMViews(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName
);
/**
* Get metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return metacat view
*/
@GET
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto getMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName
);
/**
* Rename table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param newTableName new table name
*/
@POST
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/rename")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void renameTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@QueryParam("newTableName")
String newTableName
);
/**
* Updates an existing catalog.
*
* @param catalogName catalog name
* @param createCatalogDto catalog
*/
@PUT
@Path("catalog/{catalog-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void updateCatalog(
@PathParam("catalog-name")
String catalogName,
CreateCatalogDto createCatalogDto
);
/**
* Updates the given database in the given catalog.
*
* @param catalogName catalog name.
* @param databaseName database name.
* @param databaseUpdateRequestDto database
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void updateDatabase(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
DatabaseCreateRequestDto databaseUpdateRequestDto
);
/**
* Update metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param table view
* @return updated metacat view
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto updateMView(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
@PathParam("view-name")
String viewName,
TableDto table
);
/**
* Update table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table table
* @return table
*/
@PUT
@Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
TableDto updateTable(
@PathParam("catalog-name")
String catalogName,
@PathParam("database-name")
String databaseName,
@PathParam("table-name")
String tableName,
TableDto table
);
}
| 1,633 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/ResolverV1.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.dto.ResolveByUriRequestDto;
import com.netflix.metacat.common.dto.ResolveByUriResponseDto;
import javax.ws.rs.Consumes;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* ResolverV1.
*
* @author zhenl
* @since 1.0.0
*/
@Path("mds/v1/resolver")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface ResolverV1 {
/**
* resolveByUri.
*
* @param prefixSearch use prefix search
* @param resolveByUriRequestDto resolveByUriRequest
* @return response from uri search
*/
@POST
ResolveByUriResponseDto resolveByUri(
@DefaultValue("false")
@QueryParam("prefixSearch")
Boolean prefixSearch,
ResolveByUriRequestDto resolveByUriRequestDto);
/**
* isUriUsedMoreThanOnce.
*
* @param prefixSearch use prefix search
* @param resolveByUriRequestDto resolveByUriRequest
* @return response of check if a uri used more than once
*/
@POST
@Path("isUriUsedMoreThanOnce")
Response isUriUsedMoreThanOnce(
@DefaultValue("false")
@QueryParam("prefixSearch")
Boolean prefixSearch,
ResolveByUriRequestDto resolveByUriRequestDto);
}
| 1,634 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/MetadataV1.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.client.api;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DataMetadataDto;
import com.netflix.metacat.common.dto.DataMetadataGetRequestDto;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.dto.SortOrder;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import java.util.List;
import java.util.Set;
/**
* API to manipulate user metadata.
*
* @author amajumdar
*/
@Path("mds/v1/metadata")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public interface MetadataV1 {
/**
* Returns the data metadata.
*
* @param metadataGetRequestDto metadata request
* @return data metadata
*/
@POST
@Path("data")
DataMetadataDto getDataMetadata(DataMetadataGetRequestDto metadataGetRequestDto);
/**
* Returns the list of definition metadata.
*
* @param sortBy Sort the list by this value
* @param sortOrder Sorting order to use
* @param offset Offset of the list returned
* @param limit Size of the list
* @param lifetime has lifetime set
* @param type Type of the metadata item. Values: database, table, partition
* @param name Text that matches the name of the metadata (accepts sql wildcards)
* @param dataProperties Set of data property names.
* Filters the returned list that only contains the given property names
* @return list of definition metadata
*/
@GET
@Path("definition/list")
List<DefinitionMetadataDto> getDefinitionMetadataList(
@QueryParam("sortBy")
String sortBy,
@QueryParam("sortOrder")
SortOrder sortOrder,
@QueryParam("offset")
Integer offset,
@QueryParam("limit")
Integer limit,
@DefaultValue("false")
@QueryParam("lifetime")
Boolean lifetime,
@QueryParam("type")
String type,
@QueryParam("name")
String name,
@QueryParam("data-property")
Set<String> dataProperties
);
/**
* Returns the list of qualified names owned by the given owners.
*
* @param owners set of owners
* @return the list of qualified names owned by the given owners
*/
@GET
@Path("searchByOwners")
List<QualifiedName> searchByOwners(
@QueryParam("owner")
Set<String> owners
);
/**
* Delete the definition metadata for the given name.
*
* @param name Name of definition metadata to be deleted
* @param force If true, deletes the metadata without checking if the database/table/partition exists
*/
@DELETE
@Path("definition")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
void deleteDefinitionMetadata(
@QueryParam("name")
String name,
@DefaultValue("false")
@QueryParam("force")
Boolean force
);
}
| 1,635 |
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* API package for Metacat.
*
* @author amajumdar
*/
package com.netflix.metacat.client.api;
| 1,636 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.util.Map;
/**
* MySql implementation of a connector factory.
*
* @author tgianos
* @since 1.0.0
*/
class MySqlConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
MySqlConnectorFactory(
@Nonnull @NonNull final String name,
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
super(name, catalogShardName, Lists.newArrayList(new MySqlConnectorModule(catalogShardName, configuration)));
}
}
| 1,637 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.SQLException;
/**
* Convert MySQL exceptions into generic connector exceptions for use higher up in the system.
*
* @author tgianos
* @author zhenl
* @see SQLException
* @see ConnectorException
* @see <a href="https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-error-sqlstates.html">MySQL Ref</a>
* @since 1.0.0
*/
public class MySqlExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
@NonNull @Nonnull final SQLException se,
@Nonnull @NonNull final QualifiedName name
) {
switch (se.getErrorCode()) {
case 1007: //database already exists
return new DatabaseAlreadyExistsException(name, se);
case 1050: //table already exists
return new TableAlreadyExistsException(name, se);
case 1008: //database does not exist
return new DatabaseNotFoundException(name, se);
case 1146: //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 1,638 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorModule.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.sql.DataSource;
import java.util.Map;
/**
* A Guice Module for the MySqlConnector.
*
* @author tgianos
* @since 1.0.0
*/
public class MySqlConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName catalog shard name
* @param configuration connector configuration
*/
MySqlConnectorModule(
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(MySqlTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(MySqlExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, MySqlConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, MySqlConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 1,639 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
/**
* Type converter for MySQL.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class MySqlTypeConverter extends JdbcTypeConverter {
static final int MAX_BYTE_LENGTH = 65_535;
private static final int MIN_BYTE_LENGTH = 0;
/**
* {@inheritDoc}
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String type) {
// see: https://dev.mysql.com/doc/connector-j/6.0/en/connector-j-reference-type-conversions.html
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "bit":
return this.toMetacatBitType(splitType);
case "tinyint":
// TODO: MySQL generally treats this as boolean should we? Not according to spreadsheet currently
return BaseType.TINYINT;
case "bool":
case "boolean":
return BaseType.BOOLEAN;
case "smallint":
return BaseType.SMALLINT;
case "mediumint":
case "int":
case "integer":
return BaseType.INT;
case "bigint":
return BaseType.BIGINT;
case "float": // TODO: MySQL precision is lost
return BaseType.FLOAT;
case "double":
case "double precision":
return BaseType.DOUBLE;
case "decimal":
case "dec":
return this.toMetacatDecimalType(splitType);
case "date":
return BaseType.DATE;
case "time":
return this.toMetacatTimeType(splitType);
case "datetime":
case "timestamp":
return this.toMetacatTimestampType(splitType);
case "char":
return this.toMetacatCharType(splitType);
case "varchar":
return this.toMetacatVarcharType(splitType);
case "binary":
case "tinyblob":
case "blob":
case "mediumblob":
case "longblob":
case "varbinary":
return this.toMetacatVarbinaryType(splitType);
case "tinytext":
case "text":
case "mediumtext":
case "longtext":
return BaseType.STRING;
case "json":
return BaseType.JSON;
case "year":
case "enum":
case "set":
default:
log.info("Encountered {} type. Returning Unknown type.", splitType[0]);
return BaseType.UNKNOWN;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("MySQL doesn't support array types");
case BIGINT:
return "BIGINT";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected char type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
final int charLength = charType.getLength();
if (charLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("CHAR type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single char column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (charLength <= MAX_BYTE_LENGTH) {
return "CHAR(" + charLength + ")";
} else {
return "TEXT";
}
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
return "DOUBLE";
case FLOAT:
return "FLOAT(24)";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("MySQL doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("MySQL doesn't support interval types");
case JSON:
return "JSON";
case MAP:
throw new UnsupportedOperationException("MySQL doesn't support map types");
case ROW:
throw new UnsupportedOperationException("MySQL doesn't support row types");
case SMALLINT:
return "SMALLINT";
case STRING:
return "TEXT";
case TIME:
case TIME_WITH_TIME_ZONE:
return "TIME";
case TIMESTAMP:
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMP";
case TINYINT:
return "TINYINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
if (!(type instanceof VarbinaryType)) {
throw new IllegalArgumentException("Expected varbinary type but was " + type.getClass().getName());
}
final VarbinaryType varbinaryType = (VarbinaryType) type;
final int binaryLength = varbinaryType.getLength();
if (binaryLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("VARBINARY type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single varbinary column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (binaryLength <= MAX_BYTE_LENGTH) {
return "VARBINARY(" + binaryLength + ")";
} else {
return "BLOB";
}
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
final int varCharLength = varcharType.getLength();
if (varCharLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("VARCHAR type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single varchar column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (varCharLength <= MAX_BYTE_LENGTH) {
return "VARCHAR(" + varCharLength + ")";
} else {
return "TEXT";
}
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 1,640 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorUtils;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
import java.util.Locale;
/**
* MySql specific extension of the JdbcConnectorDatabaseService implementation for any differences from default.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class MySqlConnectorDatabaseService extends JdbcConnectorDatabaseService {
/**
* Constructor.
*
* @param dataSource The datasource to use
* @param exceptionMapper The exception mapper to use
*/
@Inject
public MySqlConnectorDatabaseService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
// Overrides the super class due to MySQL using catalog instead of schemas when trying to list database names
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database names for catalog {} for request {}", catalogName, context);
try (
final Connection connection = this.getDataSource().getConnection();
final ResultSet schemas = connection.getMetaData().getCatalogs()
) {
final List<QualifiedName> names = Lists.newArrayList();
while (schemas.next()) {
final String schemaName = schemas.getString("TABLE_CAT").toLowerCase(Locale.ENGLISH);
// skip internal schemas
if (!schemaName.equals("information_schema") && !schemaName.equals("mysql")) {
if (prefix == null) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
} else if (StringUtils.isNotBlank(prefix.getDatabaseName())
&& schemaName.startsWith(prefix.getDatabaseName())) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
}
}
}
// Does user want sorting?
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing database names for catalog {} for request {}", catalogName, context);
return results;
} catch (final SQLException se) {
log.debug("An exception occurred listing database names for catalog {} for request {}",
catalogName, context, se);
throw this.getExceptionMapper().toConnectorException(se, name);
}
}
}
| 1,641 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorTableService.java
|
package com.netflix.metacat.connector.mysql;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import lombok.extern.slf4j.Slf4j;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
/**
* Mysql table service implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class MySqlConnectorTableService extends JdbcConnectorTableService {
private static final String COL_CREATE_TIME = "create_time";
private static final String COL_UPDATE_TIME = "update_time";
private static final String SQL_GET_AUDIT_INFO
= "select create_time, update_time from information_schema.tables where table_schema=? and table_name=?";
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public MySqlConnectorTableService(
final DataSource dataSource,
final JdbcTypeConverter typeConverter,
final JdbcExceptionMapper exceptionMapper) {
super(dataSource, typeConverter, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try (
final PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)
) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getTableName());
try (final ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATE_TIME))
.lastModifiedDate(resultSet.getDate(COL_UPDATE_TIME)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
}
| 1,642 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Implementation of the ConnectorPlugin interface for MySQL.
*
* @author tgianos
* @since 1.0.0
*/
public class MySqlConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "mysql";
private static final MySqlTypeConverter TYPE_CONVERTER = new MySqlTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new MySqlConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 1,643 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* MySQL connector implementation classes to plugin a MySQL based data store.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.mysql;
| 1,644 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.SpringConnectorFactory;
import com.netflix.metacat.connector.druid.configs.DruidConnectorConfig;
import com.netflix.metacat.connector.druid.configs.DruidHttpClientConfig;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
/**
* Druid Connector Factory.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorFactory extends SpringConnectorFactory {
/**
* Constructor.
*
* @param connectorContext connector config
*/
DruidConnectorFactory(
final DruidConnectorInfoConverter druidConnectorInfoConverter,
final ConnectorContext connectorContext
) {
super(druidConnectorInfoConverter, connectorContext);
super.registerClazz(DruidConnectorConfig.class, DruidHttpClientConfig.class);
super.refresh();
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.ctx.getBean(DruidConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.ctx.getBean(DruidConnectorTableService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorPartitionService getPartitionService() {
return this.ctx.getBean(DruidConnectorPartitionService.class);
}
}
| 1,645 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.druid.converter.DataSource;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import com.netflix.metacat.connector.druid.converter.DruidConverterUtil;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
/**
* Druid implementation of the ConnectorPartitionService.
* The partition concept is used to model segment in druid.
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidConnectorPartitionService implements ConnectorPartitionService {
private final MetacatDruidClient druidClient;
private final DruidConnectorInfoConverter druidConnectorInfoConverter;
/**
* Constructor.
*
* @param druidClient druid client
* @param druidConnectorInfoConverter druid infor converter
*/
public DruidConnectorPartitionService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter
) {
this.druidClient = druidClient;
this.druidConnectorInfoConverter = druidConnectorInfoConverter;
}
/**
* {@inheritDoc}
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext context,
final QualifiedName name,
final TableInfo tableInfo
) {
final ObjectNode node = this.druidClient.getAllDataByName(name.getTableName());
return DruidConverterUtil.getSegmentCount(node);
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext context,
final QualifiedName name,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final ObjectNode node = this.druidClient.getAllDataByName(name.getTableName());
final DataSource dataSource = DruidConverterUtil.getDatasourceFromAllSegmentJsonObject(node);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
dataSource.getSegmentList().forEach(
p -> partitionInfos.add(this.druidConnectorInfoConverter.getPartitionInfoFromSegment(p)));
return partitionInfos;
}
}
| 1,646 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConfigConstants.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
/**
* Druid Config Constants.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidConfigConstants {
/**
* DRUID_COORDINATOR_URI.
*/
public static final String DRUID_COORDINATOR_URI = "druid.uri";
//Http client
/**
* HTTP_TIMEOUT.
*/
public static final String HTTP_TIMEOUT = "http.timeout";
/**
* POOL_SIZE.
*/
public static final String POOL_SIZE = "pool.size";
/**
* DRUID_DB.
*/
public static final String DRUID_DB = "default";
/**
* druid name.
*/
public static final String NAME = "name";
/**
* druid properties.
*/
public static final String PROPERTIES = "properties";
/**
* druid created.
*/
public static final String CREATED = "created";
/**
* druid dimensions.
*/
public static final String DIMENSIONS = "dimensions";
/**
* druid metrics.
*/
public static final String METRICS = "metrics";
/**
* druid segments.
*/
public static final String SEGMENTS = "segments";
/**
* size.
*/
public static final String SIZE = "size";
//Segment related information
/**
* dataSource.
*/
public static final String DATA_SOURCE = "dataSource";
/**
* interval.
*/
public static final String INTERVAL = "interval";
/**
* loadSpec.
*/
public static final String LOADSPEC = "loadSpec";
/**
* bucket.
*/
public static final String LOADSPEC_BUCKET = "bucket";
/**
* key.
*/
public static final String LOADSPEC_KEY = "key";
/**
* loadspec type.
*/
public static final String LOADSPEC_TYPE = "type";
/**
* identifier.
*/
public static final String IDENTIFIER = "identifier";
/**
* default value if empty.
*/
public static final String DEFAULT_VAULE = "NONE";
private DruidConfigConstants() {
}
}
| 1,647 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorPlugin.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
/**
* Druid Connector Plugin.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "druid";
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(final ConnectorContext connectorContext) {
return new DruidConnectorFactory(
new DruidConnectorInfoConverter(connectorContext.getCatalogName()), connectorContext);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return null;
}
}
| 1,648 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorDatabaseService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import javax.annotation.Nullable;
import java.util.List;
/**
* Druid Connector DatabaseService.
* No database concept in druid.
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorDatabaseService implements ConnectorDatabaseService {
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return Lists.newArrayList(QualifiedName.ofDatabase(name.getCatalogName(), DruidConfigConstants.DRUID_DB));
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
return DatabaseInfo.builder()
.name(QualifiedName.ofDatabase(name.getCatalogName(), DruidConfigConstants.DRUID_DB))
.build();
}
}
| 1,649 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/MetacatDruidClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.List;
/**
* Druid Client.
*
* @author zhenl
* @since 1.2.0
*/
public interface MetacatDruidClient {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported for metacat druid client";
/**
* Get all data sources.
*
* @return data source names
*/
default List<String> getAllDataSources() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns all segment data of data source.
*
* @param dataSourceName dataSourceName
* @return data source raw data
*/
default ObjectNode getAllDataByName(final String dataSourceName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the latest segment of data source.
*
* @param dataSourceName dataSourceName
* @return data source raw data
*/
default ObjectNode getLatestDataByName(final String dataSourceName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 1,650 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,651 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetadataException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.druid.converter.DataSource;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import com.netflix.metacat.connector.druid.converter.DruidConverterUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.web.client.HttpClientErrorException;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.List;
/**
* Druid Connector Table Service, which manages druid data source.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidConnectorTableService implements ConnectorTableService {
private final MetacatDruidClient druidClient;
private final DruidConnectorInfoConverter druidConnectorInfoConverter;
/**
* Constructor.
*
* @param druidClient druid client
* @param druidConnectorInfoConverter druid infor object converter
*/
public DruidConnectorTableService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter
) {
this.druidClient = druidClient;
this.druidConnectorInfoConverter = druidConnectorInfoConverter;
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Get table metadata for qualified name {} for request {}", name, context);
try {
final ObjectNode node = this.druidClient.getLatestDataByName(name.getTableName());
final DataSource dataSource = DruidConverterUtil.getDatasourceFromLatestSegmentJsonObject(node);
return this.druidConnectorInfoConverter.getTableInfoFromDatasource(dataSource);
} catch (MetacatException e) {
log.error(String.format("Table %s not found.", name), e);
throw new TableNotFoundException(name);
} catch (HttpClientErrorException e) {
log.error(String.format("Failed getting table %s.", name), e);
if (HttpStatus.NOT_FOUND.equals(e.getStatusCode())) {
throw new TableNotFoundException(name);
} else {
throw new InvalidMetadataException(String.format("Invalid table %s. %s", name, e.getMessage()));
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : this.druidClient.getAllDataSources()) {
final QualifiedName qualifiedName =
QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (tableFilter == null || tableName.startsWith(tableFilter)) {
qualifiedNames.add(qualifiedName);
}
}
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (Exception exception) {
throw new ConnectorException(String.format("Failed listNames druid table %s", name), exception);
}
}
}
| 1,652 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/LoadSpec.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Builder;
import lombok.Data;
import java.util.List;
/**
* Load spec.
* @author zhenl
* @since 1.2.0
*/
@Data
@Builder
public class LoadSpec {
private final String type;
private final String bucket;
private final List<String> keys;
private final String uri;
}
| 1,653 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Interval.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
import java.time.Instant;
/**
* Interval.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class Interval implements Comparable<Interval> {
private final Instant start;
private final Instant end;
@Override
public int compareTo(final Interval interval) {
return this.getStart().compareTo(interval.getStart());
}
}
| 1,654 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Segment.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
/**
* Druid Datasource.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class Segment implements Comparable<Segment> {
private final String name;
private final Interval interval;
private final String dimensions;
private final String metric;
private final LoadSpec loadSpec;
@Override
public int compareTo(final Segment segment) {
return this.getInterval().compareTo(segment.getInterval());
}
}
| 1,655 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Database.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
/**
* Druid Database.
* This is no equivalent of database in druid, this database is 'default' always.
* @author zhenl
* @since 1.2.0
*/
@Getter
@SuppressFBWarnings
public class Database {
private final String name = DruidConfigConstants.DRUID_DB;
}
| 1,656 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DruidConverterUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import org.apache.commons.lang3.StringUtils;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Druid Converter Util.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidConverterUtil {
private DruidConverterUtil() {
}
/**
* get segment.
*
* @param node object node
* @return segment object
*/
public static DataSource getDatasourceFromAllSegmentJsonObject(final ObjectNode node) {
final Instant createTime = Instant.parse(
node.get(DruidConfigConstants.PROPERTIES)
.get(DruidConfigConstants.CREATED).asText());
final String name = node.get(DruidConfigConstants.NAME).asText();
final List<Segment> segmentList = new ArrayList<>();
for (JsonNode segNode : node.get(DruidConfigConstants.SEGMENTS)) {
final Segment segment = getSegmentFromJsonNode(segNode.deepCopy());
segmentList.add(segment);
}
Collections.sort(segmentList);
return new DataSource(name, createTime, segmentList);
}
/**
* get segment.
*
* @param node object node
* @return segment object
*/
public static DataSource getDatasourceFromLatestSegmentJsonObject(final ObjectNode node) {
final Segment segment = getSegmentFromJsonNode(node);
return new DataSource(segment.getName(), segment.getInterval().getStart(), Collections.singletonList(segment));
}
/**
* get segment count.
*
* @param node object node
* @return segment count
*/
public static int getSegmentCount(final ObjectNode node) {
return node.get(DruidConfigConstants.SEGMENTS).size();
}
private static String getUriFromKey(final String bucket, final String key) {
return bucket + "/" + key.substring(0, key.lastIndexOf("/"));
}
private static Segment getSegmentFromJsonNode(final ObjectNode node) {
final String name = node.get(DruidConfigConstants.DATA_SOURCE).asText();
final String[] intervalStr = node.get(DruidConfigConstants.INTERVAL).asText().split("/");
final Interval interval = new Interval(Instant.parse(intervalStr[0]),
Instant.parse(intervalStr[1]));
final JsonNode loadspecNode = node.get(DruidConfigConstants.LOADSPEC);
final JsonNode loadspecNodeBucket = loadspecNode.get(DruidConfigConstants.LOADSPEC_BUCKET);
// Checking for Null before accessing the node as bucket and key could be null in load spec
final String bucket = loadspecNodeBucket != null
? loadspecNode.get(DruidConfigConstants.LOADSPEC_BUCKET).asText() : "";
final JsonNode loadspecNodeKey = loadspecNode.get(DruidConfigConstants.LOADSPEC_KEY);
final List<String> keys = loadspecNodeKey != null
? Arrays.asList(loadspecNode.get(DruidConfigConstants.LOADSPEC_KEY).asText().split(","))
: new ArrayList<>();
final LoadSpec loadSpec = new LoadSpec(loadspecNode.get(DruidConfigConstants.LOADSPEC_TYPE).asText(),
bucket, keys, StringUtils.isEmpty(bucket) || keys.size() == 0
? "" : getUriFromKey(bucket, keys.get(0))
);
final String dimensions = node.get(DruidConfigConstants.DIMENSIONS).asText();
final String metric = node.get(DruidConfigConstants.METRICS).asText();
return new Segment(name, interval, dimensions, metric, loadSpec);
}
}
| 1,657 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DruidConnectorInfoConverter.java
|
package com.netflix.metacat.connector.druid.converter;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Druid Info Converter.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorInfoConverter implements ConnectorInfoConverter<Database, Database, Segment> {
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog Name
*/
public DruidConnectorInfoConverter(final String catalogName) {
this.catalogName = catalogName;
}
/**
* Convert from data source to partitionInfo.
*
* @param segment segment object
* @return partition info object
*/
public PartitionInfo getPartitionInfoFromSegment(final Segment segment) {
final Map<String, String> metadata = new HashMap<>();
metadata.put(DruidConfigConstants.LOADSPEC_KEY, segment.getLoadSpec().getKeys().toString());
metadata.put(DruidConfigConstants.LOADSPEC_BUCKET, segment.getLoadSpec().getBucket());
metadata.put(DruidConfigConstants.LOADSPEC_TYPE, segment.getLoadSpec().getType());
metadata.put(DruidConfigConstants.DIMENSIONS, segment.getDimensions());
metadata.put(DruidConfigConstants.METRICS, segment.getMetric());
final StorageInfo storageInfo = StorageInfo.builder().uri(segment.getLoadSpec().getUri()).build();
return PartitionInfo.builder().metadata(metadata).serde(storageInfo).build();
}
/**
* Convert from data source to partitionInfo.
*
* @param dataSource dataSource object
* @return table info object
*/
public TableInfo getTableInfoFromDatasource(final DataSource dataSource) {
final List<Segment> segmentList = dataSource.getSegmentList();
final Segment latestSegment = segmentList.get(segmentList.size() - 1);
final List<FieldInfo> fieldInfos = new ArrayList<>();
for (String dim : latestSegment.getDimensions().split(",")) {
fieldInfos.add(FieldInfo.builder()
.comment(DruidConfigConstants.DIMENSIONS)
.name(dim)
.type(BaseType.STRING)
.build());
}
for (String dim : latestSegment.getMetric().split(",")) {
fieldInfos.add(FieldInfo.builder()
.comment(DruidConfigConstants.METRICS)
.name(dim)
.type(BaseType.DOUBLE)
.build());
}
return TableInfo.builder().fields(fieldInfos)
.name(QualifiedName.ofTable(catalogName, DruidConfigConstants.DRUID_DB, dataSource.getName()))
.build();
}
}
| 1,658 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector converter.
*
* @author zhenl
* @since 1.2.0
*
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.converter;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,659 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DataSource.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
import java.time.Instant;
import java.util.List;
/**
* Druid Datasource.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class DataSource {
private final String name;
private final Instant createTime;
private final List<Segment> segmentList;
}
| 1,660 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/DruidHttpClientConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.util.TimeUtil;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import com.netflix.metacat.connector.druid.client.DruidHttpClientImpl;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.web.client.RestTemplate;
import java.net.UnknownHostException;
import java.util.concurrent.TimeUnit;
/**
* DruidHttpClientConfig.
*
* @author zhenl
* @since 1.2.0
*/
@Configuration
public class DruidHttpClientConfig {
/**
* Druid client instance.
*
* @param connectorContext connector context
* @param restTemplate rest template
* @return MetacatDruidClient
* @throws UnknownHostException exception for unknownhost
*/
@Bean
public MetacatDruidClient createMetacatDruidClient(
final ConnectorContext connectorContext,
final RestTemplate restTemplate) throws UnknownHostException {
return new DruidHttpClientImpl(connectorContext, restTemplate);
}
/**
* Rest template.
*
* @param connectorContext connector context
* @return RestTemplate
*/
@Bean
public RestTemplate restTemplate(final ConnectorContext connectorContext) {
return new RestTemplate(new HttpComponentsClientHttpRequestFactory(httpClient(connectorContext)));
}
/**
* Http client.
*
* @param connectorContext connector context
* @return HttpClient
*/
@Bean
public HttpClient httpClient(final ConnectorContext connectorContext) {
final int timeout = (int) TimeUtil.toTime(
connectorContext.getConfiguration().getOrDefault(DruidConfigConstants.HTTP_TIMEOUT, "5s"),
TimeUnit.SECONDS,
TimeUnit.MILLISECONDS
);
final int poolsize = Integer.parseInt(connectorContext.getConfiguration()
.getOrDefault(DruidConfigConstants.POOL_SIZE, "10"));
final RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout)
.setMaxRedirects(3)
.build();
final PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(poolsize);
return HttpClientBuilder
.create()
.setDefaultRequestConfig(config)
.setConnectionManager(connectionManager)
.build();
}
}
| 1,661 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.configs;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,662 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/DruidConnectorConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.configs;
import com.netflix.metacat.connector.druid.DruidConnectorDatabaseService;
import com.netflix.metacat.connector.druid.DruidConnectorPartitionService;
import com.netflix.metacat.connector.druid.DruidConnectorTableService;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Druid Connector Config.
*
* @author zhenl
* @since 1.2.0
*/
@Configuration
public class DruidConnectorConfig {
/**
* create druid connector table service.
*
* @param druidClient druid Client
* @param druidConnectorInfoConverter druid info converter
* @return druid connector table Service
*/
@Bean
public DruidConnectorTableService druidTableService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter) {
return new DruidConnectorTableService(
druidClient,
druidConnectorInfoConverter
);
}
/**
* create druid connector database service.
*
* @return druid connector database Service
*/
@Bean
public DruidConnectorDatabaseService druidDatabaseService() {
return new DruidConnectorDatabaseService();
}
/**
* create druid connector partition service.
*
* @param druidClient druid Client
* @param druidConnectorInfoConverter druid info converter
* @return druid connector partition Service
*/
@Bean
public DruidConnectorPartitionService druidPartitionService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter) {
return new DruidConnectorPartitionService(druidClient, druidConnectorInfoConverter);
}
}
| 1,663 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/DruidHttpClientUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.client;
/**
* DruidHttpClientUtil.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidHttpClientUtil {
private DruidHttpClientUtil() {
}
/**
* get Latest Segment.
*
* @param input segments strings
* @return lastest segment id
*/
public static String getLatestSegment(final String input) {
final String[] segments = input.substring(1, input.length() - 1).split(",");
String current = segments[0].trim().replace("\"", "");
for (int i = 1; i < segments.length; i++) {
final String next = segments[i].trim().replace("\"", "");
if (current.compareTo(next) <= 0) {
current = next;
}
}
return current;
}
}
| 1,664 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/DruidHttpClientImpl.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.client;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONArray;
import org.springframework.web.client.RestTemplate;
import javax.annotation.Nullable;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* DruidHttpClientImpl.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidHttpClientImpl implements MetacatDruidClient {
private String druidURI;
private final RestTemplate restTemplate;
private final MetacatJsonLocator jsonLocator = new MetacatJsonLocator();
/**
* Constructor.
*
* @param connectorContext connector context
* @param restTemplate rest template
*/
public DruidHttpClientImpl(final ConnectorContext connectorContext,
final RestTemplate restTemplate) {
this.restTemplate = restTemplate;
final Map<String, String> config = connectorContext.getConfiguration();
final String coordinatorUri = config.get(DruidConfigConstants.DRUID_COORDINATOR_URI);
if (coordinatorUri == null) {
throw new MetacatException("Druid cluster ending point not provided.");
}
try {
new URI(coordinatorUri);
} catch (URISyntaxException exception) {
throw new MetacatException("Druid ending point invalid");
}
this.druidURI = coordinatorUri;
log.info("druid server uri={}", this.druidURI);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getAllDataSources() {
final JSONArray arr = new JSONArray(restTemplate.getForObject(druidURI, String.class));
return IntStream.range(0, arr.length()).mapToObj(i -> arr.get(i).toString()).collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
@Nullable
public ObjectNode getAllDataByName(final String dataSourceName) {
final String result = restTemplate.getForObject(
druidURI + "/{datasoureName}?full", String.class, dataSourceName);
return jsonLocator.parseJsonObject(result);
}
/**
* {@inheritDoc}
*/
@Override
@Nullable
public ObjectNode getLatestDataByName(final String dataSourceName) {
String url = String.format(druidURI + "/%s/segments", dataSourceName);
String result = restTemplate.getForObject(url, String.class);
if (result == null) {
throw new MetacatException(String.format("Druid cluster: %s result not found.", dataSourceName));
}
final String latestSegment = DruidHttpClientUtil.getLatestSegment(result);
log.debug("Get the latest segment {}", latestSegment);
url = String.format(druidURI + "/%s/segments/%s", dataSourceName, latestSegment);
result = restTemplate.getForObject(url, String.class);
return jsonLocator.parseJsonObject(result);
}
}
| 1,665 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector client.
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.client;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,666 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorFactory.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.jpa.JpaPersistModule;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import java.util.Map;
/**
* s3 connector factory.
*/
public class S3ConnectorFactory implements ConnectorFactory {
private final String catalogName;
private final String catalogShardName;
private final Map<String, String> configuration;
private final S3ConnectorInfoConverter infoConverter;
private ConnectorDatabaseService databaseService;
private ConnectorTableService tableService;
private ConnectorPartitionService partitionService;
private PersistService persistService;
/**
* Constructor.
* @param catalogName catalog name.
* @param catalogShardName catalog shard name
* @param configuration configuration properties
* @param infoConverter S3 info converter
*/
public S3ConnectorFactory(final String catalogName, final String catalogShardName,
final Map<String, String> configuration,
final S3ConnectorInfoConverter infoConverter) {
Preconditions.checkNotNull(catalogName, "Catalog name is null");
Preconditions.checkNotNull(catalogShardName, "Catalog shard name is null");
Preconditions.checkNotNull(configuration, "Catalog connector configuration is null");
this.catalogName = catalogName;
this.catalogShardName = catalogShardName;
this.configuration = configuration;
this.infoConverter = infoConverter;
init();
}
private void init() {
//JPA module
final Map<String, Object> props = Maps.newHashMap(configuration);
props.put("hibernate.connection.datasource",
DataSourceManager.get().load(catalogShardName, configuration).get(catalogShardName));
final Module jpaModule = new JpaPersistModule("s3").properties(props);
final Module s3Module = new S3Module(catalogName, configuration, infoConverter);
final Injector injector = Guice.createInjector(jpaModule, s3Module);
persistService = injector.getInstance(PersistService.class);
persistService.start();
this.databaseService = injector.getInstance(ConnectorDatabaseService.class);
this.tableService = injector.getInstance(ConnectorTableService.class);
this.partitionService = injector.getInstance(ConnectorPartitionService.class);
}
@Override
public ConnectorDatabaseService getDatabaseService() {
return databaseService;
}
@Override
public ConnectorTableService getTableService() {
return tableService;
}
@Override
public ConnectorPartitionService getPartitionService() {
return partitionService;
}
@Override
public String getCatalogName() {
return catalogName;
}
@Override
public String getCatalogShardName() {
return catalogShardName;
}
@Override
public void stop() {
persistService.stop();
}
}
| 1,667 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3Module.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.google.inject.Binder;
import com.google.inject.Module;
import com.google.inject.Scopes;
import com.google.inject.name.Names;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.dao.impl.DatabaseDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.FieldDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.PartitionDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.SourceDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.TableDaoImpl;
import java.util.Map;
/**
* Guice module.
*/
public class S3Module implements Module {
private final String catalogName;
private final Map<String, String> configuration;
private final S3ConnectorInfoConverter infoConverter;
/**
* Constructor.
* @param catalogName catalog name.
* @param configuration configuration properties
* @param infoConverter S3 info converter
*/
public S3Module(final String catalogName, final Map<String, String> configuration,
final S3ConnectorInfoConverter infoConverter) {
this.catalogName = catalogName;
this.configuration = configuration;
this.infoConverter = infoConverter;
}
@Override
public void configure(final Binder binder) {
binder.bind(String.class).annotatedWith(Names.named("catalogName")).toInstance(catalogName);
binder.bind(ConnectorInfoConverter.class).toInstance(infoConverter);
binder.bind(S3ConnectorInfoConverter.class).toInstance(infoConverter);
binder.bind(ConnectorDatabaseService.class).to(S3ConnectorDatabaseService.class).in(Scopes.SINGLETON);
binder.bind(ConnectorTableService.class).to(S3ConnectorTableService.class).in(Scopes.SINGLETON);
binder.bind(ConnectorPartitionService.class).to(S3ConnectorPartitionService.class).in(Scopes.SINGLETON);
binder.bind(DatabaseDao.class).to(DatabaseDaoImpl.class);
binder.bind(PartitionDao.class).to(PartitionDaoImpl.class);
binder.bind(SourceDao.class).to(SourceDaoImpl.class);
binder.bind(TableDao.class).to(TableDaoImpl.class);
binder.bind(FieldDao.class).to(FieldDaoImpl.class);
}
}
| 1,668 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Database;
import com.netflix.metacat.connector.s3.model.Field;
import com.netflix.metacat.connector.s3.model.Info;
import com.netflix.metacat.connector.s3.model.Location;
import com.netflix.metacat.connector.s3.model.Schema;
import com.netflix.metacat.connector.s3.model.Table;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* S3 Connector implementation for tables.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorTableService implements ConnectorTableService {
private final DatabaseDao databaseDao;
private final TableDao tableDao;
private final FieldDao fieldDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param databaseDao database DAO impl
* @param tableDao table DAO impl
* @param fieldDao field DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorTableService(@Named("catalogName") final String catalogName,
final DatabaseDao databaseDao,
final TableDao tableDao,
final FieldDao fieldDao,
final S3ConnectorInfoConverter infoConverter) {
this.catalogName = catalogName;
this.databaseDao = databaseDao;
this.tableDao = tableDao;
this.fieldDao = fieldDao;
this.infoConverter = infoConverter;
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Create table {}", tableInfo.getName());
Preconditions.checkArgument(tableInfo.getSerde() == null
|| !Strings.isNullOrEmpty(tableInfo.getSerde().getOwner()), "Table owner is null or empty");
final QualifiedName tableName = tableInfo.getName();
if (tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(),
tableName.getTableName()) != null) {
throw new TableAlreadyExistsException(tableName);
}
final Database database = databaseDao
.getBySourceDatabaseName(catalogName, tableName.getDatabaseName());
if (database == null) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(catalogName, tableName.getDatabaseName()));
}
tableDao.save(infoConverter.fromTableInfo(database, tableInfo));
log.debug("End: Create table {}", tableInfo.getName());
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Update table {}", tableInfo.getName());
final QualifiedName tableName = tableInfo.getName();
final Table table = tableDao
.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName());
if (table == null) {
throw new TableNotFoundException(tableName);
}
//we can update the fields, the uri, or the full serde
final Location newLocation = infoConverter.toLocation(tableInfo);
Location location = table.getLocation();
if (location == null) {
location = new Location();
location.setTable(table);
table.setLocation(location);
}
if (newLocation.getUri() != null) {
location.setUri(newLocation.getUri());
}
final Info newInfo = newLocation.getInfo();
if (newInfo != null) {
final Info info = location.getInfo();
if (info == null) {
location.setInfo(newInfo);
newInfo.setLocation(location);
} else {
if (newInfo.getInputFormat() != null) {
info.setInputFormat(newInfo.getInputFormat());
}
if (newInfo.getOutputFormat() != null) {
info.setOutputFormat(newInfo.getOutputFormat());
}
if (newInfo.getOwner() != null) {
info.setOwner(newInfo.getOwner());
}
if (newInfo.getSerializationLib() != null) {
info.setSerializationLib(newInfo.getSerializationLib());
}
if (newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()) {
info.setParameters(newInfo.getParameters());
}
}
}
final Schema newSchema = newLocation.getSchema();
if (newSchema != null) {
final List<Field> newFields = newSchema.getFields();
if (newFields != null && !newFields.isEmpty()) {
final Schema schema = location.getSchema();
if (schema == null) {
location.setSchema(newSchema);
newSchema.setLocation(location);
} else {
final List<Field> fields = schema.getFields();
if (fields.isEmpty()) {
newFields.forEach(field -> {
field.setSchema(schema);
fields.add(field);
});
} else {
for (int i = 0; i < newFields.size(); i++) {
final Field newField = newFields.get(i);
newField.setPos(i);
newField.setSchema(schema);
if (newField.getType() == null) {
newField.setType(newField.getSourceType());
}
}
schema.setFields(null);
fieldDao.delete(fields);
tableDao.save(table, true);
schema.setFields(newFields);
}
}
}
}
log.debug("End: Update table {}", tableInfo.getName());
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete table {}", name);
final Table table = tableDao.getBySourceDatabaseTableName(catalogName,
name.getDatabaseName(), name.getTableName());
if (table == null) {
throw new TableNotFoundException(name);
}
tableDao.delete(table);
log.debug("End: Delete table {}", name);
}
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final Table table = tableDao.getBySourceDatabaseTableName(catalogName,
name.getDatabaseName(), name.getTableName());
if (table == null) {
throw new TableNotFoundException(name);
}
log.debug("Get table {}", name);
return infoConverter.toTableInfo(name, table);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return tableDao.getBySourceDatabaseTableName(catalogName, name.getDatabaseName(), name.getTableName()) != null;
}
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List tables for database {} with table name prefix {}", name, prefix);
return tableDao.searchBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
prefix == null ? null : prefix.getTableName(), sort, pageable).stream()
.map(t -> infoConverter.toTableInfo(QualifiedName.ofTable(catalogName, name.getDatabaseName(), t.getName()),
t)).collect(Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List table names for database {} with table name prefix {}", name, prefix);
return tableDao.searchBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
prefix == null ? null : prefix.getTableName(), sort, pageable).stream()
.map(t -> QualifiedName.ofTable(catalogName, name.getDatabaseName(), t.getName()))
.collect(Collectors.toList());
}
@Override
public void rename(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
log.debug("Start: Rename table {} with {}", oldName, newName);
final Table oldTable = tableDao.getBySourceDatabaseTableName(catalogName,
oldName.getDatabaseName(), oldName.getTableName());
if (oldTable == null) {
throw new TableNotFoundException(oldName);
}
final Table newTable = tableDao.getBySourceDatabaseTableName(catalogName,
newName.getDatabaseName(), newName.getTableName());
if (newTable == null) {
oldTable.setName(newName.getTableName());
tableDao.save(oldTable);
} else {
throw new TableAlreadyExistsException(newName);
}
log.debug("End: Rename table {} with {}", oldName, newName);
}
@Override
public Map<String, List<QualifiedName>> getTableNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return tableDao.getByUris(catalogName, uris, prefixSearch);
}
}
| 1,669 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorInfoConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeManager;
import com.netflix.metacat.common.type.TypeSignature;
import com.netflix.metacat.connector.pig.converters.PigTypeConverter;
import com.netflix.metacat.connector.s3.model.Database;
import com.netflix.metacat.connector.s3.model.Field;
import com.netflix.metacat.connector.s3.model.Info;
import com.netflix.metacat.connector.s3.model.Location;
import com.netflix.metacat.connector.s3.model.Partition;
import com.netflix.metacat.connector.s3.model.Schema;
import com.netflix.metacat.connector.s3.model.Source;
import com.netflix.metacat.connector.s3.model.Table;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Converts S3 model objects to Metacat DTOs and vice versa.
* @author amajumdar
*/
public class S3ConnectorInfoConverter implements ConnectorInfoConverter<Database, Table, Partition> {
private final PigTypeConverter pigTypeConverter;
private final boolean isUsePigTypes;
private final TypeManager typeManager;
/**
* Constructor.
* @param pigTypeConverter Type converter for PIG
* @param isUsePigTypes true, if we need to use pig type converter
* @param typeManager Type manager
*/
public S3ConnectorInfoConverter(final PigTypeConverter pigTypeConverter, final boolean isUsePigTypes,
final TypeManager typeManager) {
this.pigTypeConverter = pigTypeConverter;
this.isUsePigTypes = isUsePigTypes;
this.typeManager = typeManager;
}
@Override
public DatabaseInfo toDatabaseInfo(final QualifiedName catalogName, final Database database) {
final AuditInfo auditInfo = AuditInfo.builder().createdDate(database.getCreatedDate())
.lastModifiedDate(database.getLastUpdatedDate()).build();
return DatabaseInfo.builder().name(QualifiedName.ofDatabase(catalogName.getCatalogName(), database.getName()))
.auditInfo(auditInfo).build();
}
@Override
public Database fromDatabaseInfo(final DatabaseInfo databaseInfo) {
final Database result = new Database();
final QualifiedName databaseName = databaseInfo.getName();
result.setName(databaseName.getDatabaseName());
final Source source = new Source();
source.setName(databaseName.getCatalogName());
result.setSource(source);
return result;
}
@Override
public TableInfo toTableInfo(final QualifiedName tableName, final Table table) {
return TableInfo.builder().name(tableName).fields(toFields(table)).auditInfo(toAuditInfo(table))
.serde(toStorageInfo(table)).build();
}
private List<FieldInfo> toFields(final Table table) {
List<FieldInfo> result = Lists.newArrayList();
final Location location = table.getLocation();
if (location != null) {
final Schema schema = location.getSchema();
if (schema != null) {
result = schema.getFields().stream().sorted(Comparator.comparing(Field::getPos))
.map(this::toFieldInfo).collect(Collectors.toList());
}
}
return result;
}
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
final Table result = new Table();
result.setName(tableInfo.getName().getTableName());
final Location location = toLocation(tableInfo);
if (location != null) {
result.setLocation(location);
location.setTable(result);
}
return result;
}
/**
* Creates the s3 table.
* @param database s3 database
* @param tableInfo table info
* @return s3 table
*/
public Table fromTableInfo(final Database database, final TableInfo tableInfo) {
final Table result = fromTableInfo(tableInfo);
result.setDatabase(database);
return result;
}
@Override
public PartitionInfo toPartitionInfo(final TableInfo tableInfo, final Partition partition) {
final QualifiedName tableName = tableInfo.getName();
final StorageInfo storageInfo = tableInfo.getSerde();
storageInfo.setUri(partition.getUri());
final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate())
.lastModifiedDate(partition.getLastUpdatedDate())
.build();
final AuditInfo tableAuditInfo = tableInfo.getAudit();
if (tableAuditInfo != null) {
auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy());
auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy());
}
return PartitionInfo.builder()
.name(QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(), tableName.getTableName(), partition.getName()))
.serde(storageInfo)
.auditInfo(auditInfo)
.build();
}
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partitionInfo) {
return fromPartitionInfo(partitionInfo);
}
/**
* Converts from partition info to s3 partition object.
* @param partitionInfo partition info
* @return s3 partition
*/
Partition fromPartitionInfo(final PartitionInfo partitionInfo) {
final Partition result = new Partition();
result.setName(partitionInfo.getName().getPartitionName());
result.setUri(partitionInfo.getSerde().getUri());
final AuditInfo auditInfo = partitionInfo.getAudit();
if (auditInfo != null) {
result.setCreatedDate(auditInfo.getCreatedDate());
result.setLastUpdatedDate(auditInfo.getLastModifiedDate());
}
return result;
}
/**
* Returns a partition info.
* @param tableName table name
* @param table s3 table
* @param partition partition
* @return partition info
*/
PartitionInfo toPartitionInfo(final QualifiedName tableName, final Table table, final Partition partition) {
final StorageInfo storageInfo = toStorageInfo(table);
storageInfo.setUri(partition.getUri());
final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate())
.lastModifiedDate(partition.getLastUpdatedDate())
.build();
final AuditInfo tableAuditInfo = toAuditInfo(table);
if (tableAuditInfo != null) {
auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy());
auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy());
}
return PartitionInfo.builder()
.name(QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(), tableName.getTableName(), partition.getName()))
.serde(storageInfo)
.auditInfo(auditInfo)
.build();
}
/**
* Converts from s3 table info to storage info.
* @param table table info
* @return table info
*/
StorageInfo toStorageInfo(final Table table) {
StorageInfo result = null;
final Location location = table.getLocation();
if (location != null) {
final Map<String, String> infoParameters = Maps.newHashMap();
result = new StorageInfo();
result.setUri(location.getUri());
final Info info = location.getInfo();
if (info != null) {
result.setOwner(info.getOwner());
result.setInputFormat(info.getInputFormat());
result.setOutputFormat(info.getOutputFormat());
result.setSerializationLib(info.getSerializationLib());
if (info.getParameters() != null) {
infoParameters.putAll(info.getParameters());
}
}
result.setSerdeInfoParameters(infoParameters);
result.setParameters(Maps.newHashMap());
}
return result;
}
/**
* Gets the owner for the given table.
* @param table table info
* @return owner name
*/
public String getOwner(final Table table) {
String result = null;
final Location location = table.getLocation();
if (location != null) {
final Info info = location.getInfo();
if (info != null) {
result = info.getOwner();
}
}
return result;
}
/**
* Converts from storage info to s3 location.
* @param storageInfo storage info
* @return location
*/
Location fromStorageInfo(final StorageInfo storageInfo) {
final Location result = new Location();
if (storageInfo != null) {
result.setUri(storageInfo.getUri());
final Info info = new Info();
info.setLocation(result);
info.setOwner(storageInfo.getOwner());
info.setInputFormat(storageInfo.getInputFormat());
info.setOutputFormat(storageInfo.getOutputFormat());
info.setSerializationLib(storageInfo.getSerializationLib());
final Map<String, String> parameters = Maps.newHashMap();
if (storageInfo.getParameters() != null) {
parameters.putAll(storageInfo.getParameters());
}
if (storageInfo.getSerdeInfoParameters() != null) {
parameters.putAll(storageInfo.getSerdeInfoParameters());
}
info.setParameters(parameters);
result.setInfo(info);
}
return result;
}
/**
* Creates list of fields from table info.
* @param tableInfo table info
* @param schema schema
* @return list of fields
*/
public List<Field> toFields(final TableInfo tableInfo, final Schema schema) {
final ImmutableList.Builder<Field> columns = ImmutableList.builder();
int index = 0;
for (FieldInfo fieldInfo : tableInfo.getFields()) {
final Field field = toField(fieldInfo);
field.setPos(index++);
field.setSchema(schema);
columns.add(field);
}
return columns.build();
}
/**
* Converts from column metadata to field.
* @param fieldInfo column
* @return field
*/
public Field toField(final FieldInfo fieldInfo) {
final Field result = new Field();
result.setName(fieldInfo.getName());
result.setPartitionKey(fieldInfo.isPartitionKey());
result.setComment(fieldInfo.getComment());
result.setSourceType(fieldInfo.getSourceType());
result.setType(toTypeString(fieldInfo.getType()));
return result;
}
/**
* Converts from column metadata to field.
* @param field column
* @return field info
*/
public FieldInfo toFieldInfo(final Field field) {
return FieldInfo.builder().name(field.getName()).partitionKey(field.isPartitionKey())
.comment(field.getComment()).sourceType(field.getSourceType()).type(toType(field.getType())).build();
}
private String toTypeString(final Type type) {
String result = null;
if (isUsePigTypes) {
result = pigTypeConverter.fromMetacatType(type);
} else {
result = type.getDisplayName();
}
return result;
}
/**
* Converts from type string to Metacat type.
* @param type type
* @return Type
*/
public Type toType(final String type) {
Type result = null;
if (isUsePigTypes) {
//Hack for now. We need to correct the type format in Franklin
String typeString = type;
if ("map".equals(type)) {
typeString = "map[]";
}
result = pigTypeConverter.toMetacatType(typeString);
} else {
result = typeManager.getType(TypeSignature.parseTypeSignature(type));
}
return result;
}
/**
* Creates audit info from s3 table info.
* @param table table info
* @return audit info
*/
public AuditInfo toAuditInfo(final Table table) {
final AuditInfo result = AuditInfo.builder().createdDate(table.getCreatedDate())
.lastModifiedDate(table.getLastUpdatedDate()).build();
final Location location = table.getLocation();
if (location != null) {
final Info info = location.getInfo();
if (info != null) {
result.setCreatedBy(info.getOwner());
result.setLastModifiedBy(info.getOwner());
}
}
return result;
}
/**
* Creates location.
* @param tableInfo table info
* @return location
*/
public Location toLocation(final TableInfo tableInfo) {
final Location location = fromStorageInfo(tableInfo.getSerde());
final Schema schema = new Schema();
schema.setLocation(location);
schema.setFields(toFields(tableInfo, schema));
location.setSchema(schema);
return location;
}
/**
* Creates s3 partition.
* @param table table
* @param partitionInfo partition info
* @return partition
*/
public Partition toPartition(final Table table, final PartitionInfo partitionInfo) {
final Partition result = fromPartitionInfo(partitionInfo);
result.setTable(table);
return result;
}
/**
* Gets the partition uri.
* @param partitionInfo partition
* @return uri
*/
public String getUri(final PartitionInfo partitionInfo) {
return partitionInfo.getSerde() == null ? null : partitionInfo.getSerde().getUri();
}
/**
* Gets the partition keys for the given table.
* @param table table info
* @return list of keys
*/
public List<String> partitionKeys(final Table table) {
List<String> result = Lists.newArrayList();
if (table.getLocation() != null) {
final Schema schema = table.getLocation().getSchema();
if (schema != null) {
final List<Field> fields = schema.getFields();
result = fields.stream().filter(Field::isPartitionKey).map(Field::getName).collect(Collectors.toList());
}
}
return result;
}
}
| 1,670 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.model.Database;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.util.List;
import java.util.stream.Collectors;
/**
* S3 Connector Database Service implementation.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorDatabaseService implements ConnectorDatabaseService {
private final SourceDao sourceDao;
private final DatabaseDao databaseDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param databaseDao database DAO impl
* @param sourceDao catalog/source DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorDatabaseService(@Named("catalogName") final String catalogName, final DatabaseDao databaseDao,
final SourceDao sourceDao, final S3ConnectorInfoConverter infoConverter) {
this.databaseDao = databaseDao;
this.sourceDao = sourceDao;
this.infoConverter = infoConverter;
this.catalogName = catalogName;
}
@Override
public List<QualifiedName> listViewNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName databaseName) {
return Lists.newArrayList();
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo databaseInfo) {
final String databaseName = databaseInfo.getName().getDatabaseName();
log.debug("Start: Create database {}", databaseInfo.getName());
Preconditions.checkNotNull(databaseName, "Database name is null");
if (databaseDao.getBySourceDatabaseName(catalogName, databaseName) != null) {
log.warn("Database {} already exists", databaseName);
throw new DatabaseAlreadyExistsException(databaseInfo.getName());
}
final Database database = new Database();
database.setName(databaseName);
database.setSource(sourceDao.getByName(catalogName));
databaseDao.save(database);
log.debug("End: Create database {}", databaseInfo.getName());
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo databaseInfo) {
// no op
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete database {}", name);
final String databaseName = name.getDatabaseName();
Preconditions.checkNotNull(databaseName, "Database name is null");
final Database database = databaseDao.getBySourceDatabaseName(catalogName, databaseName);
if (database == null) {
throw new DatabaseNotFoundException(name);
} else if (database.getTables() != null && !database.getTables().isEmpty()) {
throw new ConnectorException("Database " + databaseName + " is not empty. One or more tables exist.", null);
}
databaseDao.delete(database);
log.debug("End: Delete database {}", name);
}
@Override
public DatabaseInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
Preconditions.checkNotNull(databaseName, "Database name is null");
log.debug("Get database {}", name);
final Database database = databaseDao.getBySourceDatabaseName(catalogName, databaseName);
if (database == null) {
throw new DatabaseNotFoundException(name);
}
return infoConverter.toDatabaseInfo(QualifiedName.ofCatalog(catalogName), database);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return databaseDao.getBySourceDatabaseName(catalogName, name.getDatabaseName()) != null;
}
@Override
public List<DatabaseInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List databases for catalog {} and database with prefix {}", name, prefix);
return databaseDao.searchBySourceDatabaseName(catalogName, prefix == null ? "" : prefix.getTableName(),
sort, pageable).stream().map(d -> infoConverter.toDatabaseInfo(name, d)).collect(
Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List database names for catalog {} and database with prefix {}", name, prefix);
return databaseDao.searchBySourceDatabaseName(catalogName, prefix == null ? "" : prefix.getTableName(),
sort, pageable).stream().map(d -> QualifiedName.ofDatabase(catalogName, d.getName())).collect(
Collectors.toList());
}
@Override
public void rename(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
log.debug("Start: Rename database {} with {}", oldName, newName);
final String newDatabaseName = newName.getDatabaseName();
Preconditions.checkNotNull(newDatabaseName, "Database name is null");
final Database oldDatabase = databaseDao.getBySourceDatabaseName(catalogName, oldName.getDatabaseName());
if (oldDatabase == null) {
throw new DatabaseNotFoundException(oldName);
}
if (databaseDao.getBySourceDatabaseName(catalogName, newDatabaseName) != null) {
throw new DatabaseAlreadyExistsException(newName);
}
oldDatabase.setName(newDatabaseName);
databaseDao.save(oldDatabase);
log.debug("End: Rename database {} with {}", oldName, newName);
}
}
| 1,671 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.model.BaseInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Partition;
import com.netflix.metacat.connector.s3.model.Table;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.io.StringReader;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* S3 Connector implementation for partitions.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorPartitionService implements ConnectorPartitionService {
private static final String FIELD_DATE_CREATED = "dateCreated";
private static final String FIELD_BATCHID = "batchid";
private final TableDao tableDao;
private final PartitionDao partitionDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param tableDao table DAO impl
* @param partitionDao partition DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorPartitionService(@Named("catalogName") final String catalogName, final TableDao tableDao,
final PartitionDao partitionDao, final S3ConnectorInfoConverter infoConverter) {
this.tableDao = tableDao;
this.partitionDao = partitionDao;
this.infoConverter = infoConverter;
this.catalogName = catalogName;
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Create partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(),
name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (!partitions.isEmpty()) {
throw new PartitionAlreadyExistsException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.toPartition(table, partitionInfo));
log.debug("End: Create partition {}", name);
}
private Table getTable(final QualifiedName tableName) {
final Table result = tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(),
tableName.getTableName());
if (result == null) {
throw new TableNotFoundException(tableName);
}
return result;
}
@Override
public List<PartitionInfo> getPartitions(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partitions for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true);
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Update partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(),
name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (partitions.isEmpty()) {
throw new PartitionNotFoundException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.fromPartitionInfo(partitionInfo));
log.debug("End: Update partition {}", name);
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete partition {}", name);
partitionDao.deleteByNames(catalogName, name.getDatabaseName(), name.getTableName(),
Lists.newArrayList(name.getPartitionName()));
log.debug("End: Delete partition {}", name);
}
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(
partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(),
partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
private Map<String, Partition> getPartitionsByNames(final Long tableId,
final List<String> partitionNames) {
final List<Partition> partitions = partitionDao.getPartitions(tableId, partitionNames, null, null, null, null);
return partitions.stream().collect(Collectors.toMap(Partition::getName, partition -> partition));
}
@Override
public PartitionInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(), name.getTableName());
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (partitions.isEmpty()) {
throw new PartitionNotFoundException(tableName, name.getPartitionName());
}
log.debug("Get partition for table {}", tableName);
return infoConverter.toPartitionInfo(tableName, table, partitions.get(0));
}
@Override
public void deletePartitions(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName tableName,
@Nonnull final List<String> partitionNames,
final TableInfo tableInfo) {
log.debug("Start: Delete partitions {} for table {}", partitionNames, tableName);
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionNames);
log.debug("End: Delete partitions {} for table {}", partitionNames, tableName);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
final Table table = tableDao.getBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
name.getTableName());
if (table != null) {
result = !partitionDao.getPartitions(table.getId(),
Lists.newArrayList(name.getPartitionName()), null, null, null, null).isEmpty();
}
return result;
}
@Override
public int getPartitionCount(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName table,
final TableInfo tableInfo
) {
return partitionDao.count(catalogName, table.getDatabaseName(), table.getTableName()).intValue();
}
@Override
public List<PartitionInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("Get partitions for table {} with name prefix {}", name, prefix);
List<PartitionInfo> result = _getPartitions(name, null, null, sort, pageable, true);
if (prefix != null) {
result = result.stream().filter(p -> p.getName().getPartitionName().startsWith(prefix.getPartitionName()))
.collect(Collectors.toList());
}
return result;
}
@Override
public Map<String, List<QualifiedName>> getPartitionNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return partitionDao.getByUris(uris, prefixSearch).stream().collect(Collectors.groupingBy(Partition::getUri,
Collectors.mapping(p -> QualifiedName.ofPartition(
catalogName, p.getTable().getDatabase().getName(), p.getTable().getName(), p.getName()),
Collectors.toList())));
}
@Override
public List<String> getPartitionKeys(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partition keys for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true).stream()
.map(p -> p.getName().getPartitionName()).collect(Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("Get partition names for table {} with prefix {}", name, prefix);
Stream<QualifiedName> result = _getPartitions(name, null, null, sort, pageable, true)
.stream().map(BaseInfo::getName);
if (prefix != null) {
result = result
.filter(partitionName -> partitionName.getPartitionName().startsWith(prefix.getPartitionName()));
}
return result.collect(Collectors.toList());
}
@Override
public List<String> getPartitionUris(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partition uris for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true).stream()
.filter(p -> p.getSerde() != null && p.getSerde().getUri() != null)
.map(p -> p.getSerde().getUri()).collect(Collectors.toList());
}
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getPartitions(final QualifiedName tableName,
final String filterExpression,
final List<String> partitionIds,
final Sort sort,
final Pageable pageable,
final boolean includePartitionDetails) {
//
// Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes)
// will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so
//
final List<PartitionInfo> partitions = com.google.common.collect.Lists.newArrayList();
if (partitionIds != null && partitionIds.size() > 5000) {
final List<List<String>> subFilterPartitionNamesList = com.google.common.collect.Lists
.partition(partitionIds, 5000);
subFilterPartitionNamesList.forEach(
subPartitionIds -> partitions.addAll(_getConnectorPartitions(tableName, filterExpression,
subPartitionIds, sort, pageable, includePartitionDetails)));
} else {
partitions.addAll(_getConnectorPartitions(tableName, filterExpression, partitionIds, sort, pageable,
includePartitionDetails));
}
return partitions;
}
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getConnectorPartitions(final QualifiedName tableName,
final String filterExpression,
final List<String> partitionIds,
final Sort sort,
final Pageable pageable,
final boolean includePartitionDetails) {
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
// Support for dateCreated
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
// Table
final Table table = getTable(tableName);
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort,
Strings.isNullOrEmpty(filterExpression) ? pageable : null);
final FilterPartition filter = new FilterPartition();
List<PartitionInfo> result = partitions.stream().filter(partition -> {
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + "");
}
return Strings.isNullOrEmpty(filterExpression)
|| filter
.evaluatePartitionExpression(filterExpression, partition.getName(), partition.getUri(), isBatched,
values);
}).map(partition -> infoConverter.toPartitionInfo(tableName, table, partition)).collect(Collectors.toList());
//
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
int limit = pageable.getOffset() + pageable.getLimit();
if (result.size() < limit) {
limit = result.size();
}
if (pageable.getOffset() > limit) {
result = Lists.newArrayList();
} else {
result = result.subList(pageable.getOffset(), limit);
}
}
return result;
}
private String getDateCreatedSqlCriteria(final String filterExpression) {
final StringBuilder result = new StringBuilder();
Collection<String> values = com.google.common.collect.Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
values = (Collection<String>) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(new PartitionParamParserEval(), null);
} catch (Throwable ignored) {
//
}
}
for (String value : values) {
if (result.length() != 0) {
result.append(" and ");
}
result.append(value.replace("dateCreated", "to_seconds(p.date_created)"));
}
return result.toString();
}
private Collection<String> getSinglePartitionExprs(final String filterExpression) {
Collection<String> result = com.google.common.collect.Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
result = (Collection<String>) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(new PartitionKeyParserEval(), null);
} catch (Throwable ignored) {
//
}
}
if (result != null) {
result = result.stream().filter(s -> !(s.startsWith("batchid=") || s.startsWith("dateCreated="))).collect(
Collectors.toList());
}
return result;
}
}
| 1,672 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorPlugin.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.connector.pig.converters.PigTypeConverter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* S3 plugin.
*/
public class S3ConnectorPlugin implements ConnectorPlugin {
/**
* Type of the connector.
*/
public static final String CONNECTOR_TYPE = "s3";
private static final PigTypeConverter PIG_TYPE_CONVERTER = new PigTypeConverter();
private static final ConnectorInfoConverter INFO_CONVERTER_S3 =
new S3ConnectorInfoConverter(PIG_TYPE_CONVERTER, true, TypeRegistry.getTypeRegistry());
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new S3ConnectorFactory(connectorContext.getCatalogName(), connectorContext.getCatalogShardName(),
connectorContext.getConfiguration(), (S3ConnectorInfoConverter) getInfoConverter());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return PIG_TYPE_CONVERTER;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorInfoConverter getInfoConverter() {
return INFO_CONVERTER_S3;
}
}
| 1,673 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 connector classes.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3;
| 1,674 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/TableDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Table;
import java.util.List;
import java.util.Map;
/**
* Table DAO.
*/
public interface TableDao extends BaseDao<Table> {
/**
* Get a table.
* @param sourceName source name
* @param databaseName database name
* @param tableName table name
* @return table
*/
Table getBySourceDatabaseTableName(String sourceName, String databaseName, String tableName);
/**
* Get list of tables.
* @param sourceName source name
* @param databaseName database name
* @param tableNames table names
* @return tables
*/
List<Table> getBySourceDatabaseTableNames(String sourceName, String databaseName, List<String> tableNames);
/**
* Get list of databases for the given source name and database name prefix.
* @param sourceName source name
* @param databaseName database name
* @param tableNamePrefix table name prefix
* @param sort sort
* @param pageable pageable
* @return list of tables
*/
List<Table> searchBySourceDatabaseTableName(String sourceName, String databaseName, String tableNamePrefix,
Sort sort, Pageable pageable);
/**
* Gets the names of the tables for the given uris.
* @param sourceName source name
* @param uris list of uri paths
* @param prefixSearch if true, will do a prefix search
* @return Map of uri to list of table names
*/
Map<String, List<QualifiedName>> getByUris(String sourceName, List<String> uris, boolean prefixSearch);
}
| 1,675 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/PartitionDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Partition;
import java.util.List;
/**
* Partition DAO.
*/
public interface PartitionDao extends BaseDao<Partition> {
/**
* Get the list of partitions.
* @param tableId table id
* @param partitionIds partition names
* @param partitionParts parts
* @param dateCreatedSqlCriteria criteria
* @param sort sort
* @param pageable pageable
* @return list of partitions
*/
List<Partition> getPartitions(Long tableId, List<String> partitionIds, Iterable<String> partitionParts,
String dateCreatedSqlCriteria, Sort sort, Pageable pageable);
/**
* Deletes the partitions for the given table and list of partition ids.
* @param sourceName catalog/source name
* @param databaseName schema/database name
* @param tableName table name
* @param partitionIds list of partition ids
*/
void deleteByNames(String sourceName, String databaseName, String tableName, List<String> partitionIds);
/**
* Returns the number of partitions for the given table.
* @param sourceName catalog/source name
* @param databaseName schema/database name
* @param tableName table name
* @return number of partitions
*/
Long count(String sourceName, String databaseName, String tableName);
/**
* Returns the list of partitions with the given uri.
* @param uris uri paths
* @param prefixSearch true, if the given uri is partial
* @return list of partitions
*/
List<Partition> getByUris(List<String> uris, boolean prefixSearch);
}
| 1,676 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/BaseDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import java.util.List;
/**
* The base dao.
* @param <T> model entity type.
*/
public interface BaseDao<T> {
/**
* Save the entity to the data store.
* @param entity the entity to save.
* @return entity itself after being saved
*/
T save(T entity);
/**
* Save the entity and refresh the entity from
* the database if required.
*
* @param entity the entity to be saved and refreshed.
*
* @param isRefreshRequired {@code true} to perform a refresh from the store.
* @return entity itself
*/
T save(T entity, boolean isRefreshRequired);
/**
* Saves all given entities.
*
* @param entities list of entities to save
* @return the saved entities
* @throws IllegalArgumentException in case the given entity is (@literal null}.
*/
List<T> save(Iterable<T> entities);
/**
* Delete the entity by using the id.
* @param id the id of the entity.
*/
void deleteById(Long id);
/**
* Delete the entities for the given ids.
* @param ids list of ids.
*/
void deleteById(Iterable<Long> ids);
/**
* Delete the given entity.
* @param entity entity to delete
*/
void delete(T entity);
/**
* Delete the given entities.
* @param entities list of entities to delete
*/
void delete(Iterable<T> entities);
/**
* Deletes all entities managed by the repository.
*/
void deleteAll();
/**
* Returns whether an entity with the given id exists.
* @param id must not be {@literal null}.
* @return true if an entity with the given id exists, {@literal false} otherwise
* @throws IllegalArgumentException if {@code id} is {@literal null}
*/
boolean isExists(Long id);
/**
* Returns an entity for the given id.
* @param id id of the entity
* @return Returns an entity for the given id
*/
T get(Long id);
/**
* Returns an entity for the given name.
* @param name name of the entity
* @return Returns an entity for the given name
*/
T getByName(String name);
/**
* Returns a list of entities for the given names.
* @param names names of the entities
* @return Returns a list of entities for the given names
*/
List<T> getByNames(List<String> names);
/**
* Returns the list of entities for the given ids.
* @param ids list of ids
* @return Returns the list of entities for the given ids
*/
List<T> get(Iterable<Long> ids);
/**
* Returns all the instances.
* @return Returns all the instances
*/
List<T> getAll();
/**
* Returns the number of entities available.
*
* @return the number of entities
*/
long count();
}
| 1,677 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/SourceDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.connector.s3.model.Source;
/**
* Source DAO.
*/
public interface SourceDao extends BaseDao<Source> {
/**
* Get source.
* @param name name
* @param fromCache if true, it will be fetched from cache.
* @return source
*/
Source getByName(String name, boolean fromCache);
}
| 1,678 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/DatabaseDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Database;
import java.util.List;
/**
* Database DAO.
*/
public interface DatabaseDao extends BaseDao<Database> {
/**
* Get database for the given source and database name.
* @param sourceName source name
* @param databaseName database name
* @return Database
*/
Database getBySourceDatabaseName(String sourceName, String databaseName);
/**
* Get list of databases for the given source name and database names.
* @param sourceName source name
* @param databaseNames list of database names
* @return list of databases
*/
List<Database> getBySourceDatabaseNames(String sourceName, List<String> databaseNames);
/**
* Get list of databases for the given source name and database name prefix.
* @param sourceName source name
* @param databaseNamePrefix database name prefix
* @param sort sort
* @param pageable pageable
* @return list of databases
*/
List<Database> searchBySourceDatabaseName(String sourceName, String databaseNamePrefix,
Sort sort, Pageable pageable);
}
| 1,679 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/FieldDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.connector.s3.model.Field;
/**
* Field DAO.
*/
public interface FieldDao extends BaseDao<Field> {
}
| 1,680 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 dao interfaces.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3.dao;
| 1,681 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/PartitionDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.model.Partition;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.Query;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Partition DAO impl.
*/
public class PartitionDaoImpl extends IdEntityDaoImpl<Partition> implements PartitionDao {
private static final String SQL_GET_PARTITIONS = "select * from partition_table as p where p.table_id=:tableId";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public PartitionDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Partition> getEntityClass() {
return Partition.class;
}
/**
* Gets the partitions.
* @param tableId table id
* @param partitionIds partition names
* @param partitionParts parts
* @param dateCreatedSqlCriteria criteria
* @param sort sort
* @param pageable pageable
* @return list of partitions
*/
public List<Partition> getPartitions(final Long tableId, final List<String> partitionIds,
final Iterable<String> partitionParts, final String dateCreatedSqlCriteria,
final Sort sort, final Pageable pageable) {
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITIONS);
if (partitionIds != null && !partitionIds.isEmpty()) {
queryBuilder.append(" and p.name in ('")
.append(Joiner.on("','").skipNulls().join(partitionIds))
.append("')");
}
if (partitionParts != null) {
for (String singlePartitionExpr : partitionParts) {
queryBuilder.append(" and p.name like '%").append(singlePartitionExpr).append("%'");
}
}
if (!Strings.isNullOrEmpty(dateCreatedSqlCriteria)) {
queryBuilder.append(" and ").append(dateCreatedSqlCriteria);
}
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
if (pageable != null && pageable.isPageable()) {
queryBuilder.append(" limit ").append(pageable.getOffset()).append(',').append(pageable.getLimit());
}
// entityManager
final EntityManager entityManager = em.get();
final Query pQuery = entityManager.createNativeQuery(queryBuilder.toString(), Partition.class);
pQuery.setParameter("tableId", tableId);
return pQuery.getResultList();
}
@Override
public void deleteByNames(final String sourceName, final String databaseName, final String tableName,
final List<String> partitionNames) {
final Query query = em.get().createNamedQuery(Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableName", tableName);
query.setParameter("partitionNames", partitionNames);
query.executeUpdate();
}
@Override
public Long count(final String sourceName, final String databaseName, final String tableName) {
final TypedQuery<Long> query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_COUNT_FOR_TABLE,
Long.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableName", tableName);
return query.getSingleResult();
}
@Override
public List<Partition> getByUris(final List<String> uris, final boolean prefixSearch) {
TypedQuery<Partition> query = null;
if (prefixSearch) {
final StringBuilder builder = new StringBuilder("select p from Partition p where 1=2");
uris.forEach(uri -> builder.append(" or uri like '").append(uri).append("%'"));
query = em.get().createQuery(builder.toString(), Partition.class);
} else {
query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_BY_URI, Partition.class);
query.setParameter("uris", uris);
}
return query.getResultList();
}
}
| 1,682 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/TableDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Table;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.Query;
import javax.persistence.TypedQuery;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Table DAO impl.
*/
public class TableDaoImpl extends IdEntityDaoImpl<Table> implements TableDao {
private static final String SQL_SEARCH_TABLES =
"select t from Table t where t.database.source.name=:sourceName and t.database.name=:databaseName"
+ " and (1=:isTableNameNull or t.name like :tableName)";
private static final String SQL_GET_TABLE_NAMES_BY_URIS =
"select d.name dname,t.name,uri from source s join database_object d on s.id=d.source_id join table_object t"
+ " on d.id=t.database_id join location l on t.id=l.table_id where s.name=:sourceName";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public TableDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Table> getEntityClass() {
return Table.class;
}
@Override
public Table getBySourceDatabaseTableName(final String sourceName, final String databaseName,
final String tableName) {
Table result = null;
final List<Table> tables = getBySourceDatabaseTableNames(sourceName, databaseName,
Lists.newArrayList(tableName));
if (!tables.isEmpty()) {
result = tables.get(0);
}
return result;
}
@Override
public List<Table> getBySourceDatabaseTableNames(final String sourceName, final String databaseName,
final List<String> tableNames) {
final TypedQuery<Table> query = em.get().createNamedQuery(Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES,
Table.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableNames", tableNames);
return query.getResultList();
}
@Override
public List<Table> searchBySourceDatabaseTableName(final String sourceName, final String databaseName,
final String tableNamePrefix, final Sort sort, final Pageable pageable) {
final StringBuilder queryBuilder = new StringBuilder(SQL_SEARCH_TABLES);
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
final TypedQuery<Table> query = em.get().createQuery(queryBuilder.toString(), Table.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("isTableNameNull", tableNamePrefix == null ? 1 : 0);
query.setParameter("tableName", tableNamePrefix + "%");
if (pageable != null && pageable.isPageable()) {
query.setFirstResult(pageable.getOffset());
query.setMaxResults(pageable.getLimit());
}
return query.getResultList();
}
@Override
public Map<String, List<QualifiedName>> getByUris(final String sourceName, final List<String> uris,
final boolean prefixSearch) {
final StringBuilder builder = new StringBuilder(SQL_GET_TABLE_NAMES_BY_URIS);
if (prefixSearch) {
builder.append(" and ( 1=0");
uris.forEach(uri -> builder.append(" or uri like '").append(uri).append("%'"));
builder.append(")");
} else {
builder.append(" and uri in (:uris)");
}
final Query query = em.get().createNativeQuery(builder.toString());
query.setParameter("sourceName", sourceName);
if (!prefixSearch) {
query.setParameter("uris", uris);
}
final List<Object[]> result = query.getResultList();
return result.stream().collect(Collectors.groupingBy(o -> (String) o[2], Collectors
.mapping(o -> QualifiedName.ofTable(sourceName, (String) o[0], (String) o[1]), Collectors.toList())));
}
}
| 1,683 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/IdEntityDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.connector.s3.dao.BaseDao;
import com.netflix.metacat.connector.s3.model.IdEntity;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.criteria.CriteriaBuilder;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Root;
import java.util.List;
/**
* Id Entity DAO.
* @param <T> model entity type.
*/
public abstract class IdEntityDaoImpl<T extends IdEntity> extends BaseDaoImpl<T> implements
BaseDao<T> {
/**
* Constructor.
* @param em entity manager
*/
protected IdEntityDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
public List<T> get(final Iterable<Long> ids) {
final EntityManager entityManager = em.get();
final CriteriaBuilder cb = entityManager.getCriteriaBuilder();
final CriteriaQuery<T> criteriaQuery = cb.createQuery(getEntityClass());
final Root<T> root = criteriaQuery.from(getEntityClass());
criteriaQuery.where(root.get("id").in(Lists.newArrayList(ids)));
return entityManager.createQuery(criteriaQuery).getResultList();
}
@Override
protected boolean isNew(final T entity) {
return entity.getId() == null;
}
}
| 1,684 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/BaseDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.metacat.connector.s3.dao.BaseDao;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Base DAO implementation.
* @param <T> model entity type.
*/
public abstract class BaseDaoImpl<T> implements BaseDao<T> {
private static final String SQL_GET_BY_NAME = "select a from %s a where name=:name";
private static final String SQL_GET_BY_NAMES = "select a from %s a where name in (:names)";
protected Provider<EntityManager> em;
protected BaseDaoImpl(final Provider<EntityManager> em) {
this.em = em;
}
protected abstract Class<T> getEntityClass();
@Override
public T save(final T entity) {
return save(entity, false);
}
protected abstract boolean isNew(T entity);
@Override
public T save(final T entity, final boolean flush) {
T result = null;
final EntityManager entityManager = em.get();
if (isNew(entity)) {
entityManager.persist(entity);
result = entity;
} else {
result = entityManager.merge(entity);
}
if (flush) {
entityManager.flush();
}
return result;
}
@Override
public List<T> save(final Iterable<T> entities) {
final List<T> result = Lists.newArrayList();
if (entities != null) {
for (T entity : entities) {
result.add(save(entity));
}
}
return result;
}
@Override
public void deleteById(final Long id) {
Preconditions.checkArgument(id != null, "Id cannot be null.");
final T entity = get(id);
if (entity != null) {
delete(entity);
}
}
@Override
public void deleteById(final Iterable<Long> ids) {
Preconditions.checkArgument(ids != null, "Ids cannot be null.");
for (Long id : ids) {
deleteById(id);
}
}
@Override
public void delete(final T entity) {
Preconditions.checkArgument(entity != null, "Entity cannot be null.");
final EntityManager entityManager = em.get();
entityManager.remove(entity);
}
@Override
public void delete(final Iterable<T> entities) {
Preconditions.checkArgument(entities != null, "Entities cannot be null.");
for (T entity : entities) {
delete(entity);
}
}
@Override
public void deleteAll() {
em.get().createQuery("delete from " + getEntityClass().getName()).executeUpdate();
}
@Override
public boolean isExists(final Long id) {
return get(id) != null;
}
@Override
public T get(final Long id) {
Preconditions.checkArgument(id != null, "Id cannot be null.");
return em.get().find(getEntityClass(), id);
}
@Override
@SuppressFBWarnings
public T getByName(final String name) {
T result = null;
Preconditions.checkArgument(name != null, "Name cannot be null.");
final TypedQuery<T> query = em.get()
.createQuery(String.format(SQL_GET_BY_NAME, getEntityClass().getName()), getEntityClass());
query.setParameter("name", name);
try {
result = query.getSingleResult();
} catch (Exception ignored) { }
return result;
}
@Override
public List<T> getByNames(final List<String> names) {
List<T> result = Lists.newArrayList();
if (names != null && !names.isEmpty()) {
final TypedQuery<T> query = em.get()
.createQuery(String.format(SQL_GET_BY_NAMES, getEntityClass().getName()), getEntityClass());
query.setParameter("names", names);
result = query.getResultList();
}
return result;
}
@Override
public List<T> get(final Iterable<Long> ids) {
final List<T> result = Lists.newArrayList();
for (Long id : ids) {
result.add(get(id));
}
return result;
}
@Override
@SuppressWarnings("unchecked")
public List<T> getAll() {
return em.get().createQuery("select a from " + getEntityClass().getName() + " a").getResultList();
}
@Override
public long count() {
return (long) em.get().createQuery("select count(a) from " + getEntityClass().getName() + " a")
.getSingleResult();
}
}
| 1,685 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/DatabaseDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.model.Database;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Database DAO implementation.
*/
public class DatabaseDaoImpl extends IdEntityDaoImpl<Database> implements DatabaseDao {
private static final String SQL_SEARCH_DATABASES =
"select d from Database d where d.source.name=:sourceName and (1=:isNameNull or d.name like :databaseName)";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public DatabaseDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Database> getEntityClass() {
return Database.class;
}
@Override
public Database getBySourceDatabaseName(final String sourceName, final String databaseName) {
Database result = null;
final List<Database> databases = getBySourceDatabaseNames(sourceName, Lists.newArrayList(databaseName));
if (!databases.isEmpty()) {
result = databases.get(0);
}
return result;
}
@Override
public List<Database> getBySourceDatabaseNames(final String sourceName, final List<String> databaseNames) {
final TypedQuery<Database> query = em.get().createNamedQuery(Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES,
Database.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseNames", databaseNames);
return query.getResultList();
}
@Override
public List<Database> searchBySourceDatabaseName(final String sourceName, final String databaseNamePrefix,
final Sort sort, final Pageable pageable) {
final StringBuilder queryBuilder = new StringBuilder(SQL_SEARCH_DATABASES);
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
// entityManager
final EntityManager entityManager = em.get();
final TypedQuery<Database> pQuery = entityManager.createQuery(queryBuilder.toString(), Database.class);
pQuery.setParameter("sourceName", sourceName);
pQuery.setParameter("isNameNull", databaseNamePrefix == null ? 1 : 0);
pQuery.setParameter("databaseName", databaseNamePrefix + "%");
if (pageable != null && pageable.isPageable()) {
pQuery.setFirstResult(pageable.getOffset());
pQuery.setMaxResults(pageable.getLimit());
}
return pQuery.getResultList();
}
}
| 1,686 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/FieldDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.model.Field;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
/**
* Field DAO impl.
*/
public class FieldDaoImpl extends IdEntityDaoImpl<Field> implements FieldDao {
/**
* Constructor.
* @param em entity manager
*/
@Inject
public FieldDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Field> getEntityClass() {
return Field.class;
}
}
| 1,687 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/SourceDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.netflix.metacat.common.server.connectors.exception.CatalogNotFoundException;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.model.Source;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* Source DAO impl.
*/
public class SourceDaoImpl extends IdEntityDaoImpl<Source> implements SourceDao {
private LoadingCache<String, Source> sourceCache = CacheBuilder.newBuilder().expireAfterWrite(120, TimeUnit.MINUTES)
.build(
new CacheLoader<String, Source>() {
@Override
public Source load(final String name) throws Exception {
return loadSource(name);
}
});
/**
* Constructor.
* @param em entity manager
*/
@Inject
public SourceDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Source> getEntityClass() {
return Source.class;
}
private Source loadSource(final String name) {
return super.getByName(name);
}
@Override
public Source getByName(final String name) {
Source result = null;
try {
result = sourceCache.get(name);
} catch (ExecutionException ignored) {
//
}
if (result == null) {
throw new CatalogNotFoundException(name);
}
return result;
}
@Override
public Source getByName(final String name, final boolean fromCache) {
if (!fromCache) {
sourceCache.invalidate(name);
}
return getByName(name);
}
}
| 1,688 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 dao implementations.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3.dao.impl;
| 1,689 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Info.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CollectionTable;
import javax.persistence.Column;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.JoinColumn;
import javax.persistence.MapKeyColumn;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
import java.util.Map;
/**
* Info.
*/
@Entity
@javax.persistence.Table(name = "info",
uniqueConstraints = @UniqueConstraint(name = "info_u1", columnNames = "location_id"))
public class Info extends IdEntity {
private String inputFormat;
private String outputFormat;
private String serializationLib;
private String owner;
private Map<String, String> parameters;
private Location location;
@Column(name = "input_format")
public String getInputFormat() {
return inputFormat;
}
public void setInputFormat(final String inputFormat) {
this.inputFormat = inputFormat;
}
@Column(name = "output_format")
public String getOutputFormat() {
return outputFormat;
}
public void setOutputFormat(final String outputFormat) {
this.outputFormat = outputFormat;
}
@Column(name = "serialization_lib")
public String getSerializationLib() {
return serializationLib;
}
public void setSerializationLib(final String serializationLib) {
this.serializationLib = serializationLib;
}
@Column(name = "owner")
public String getOwner() {
return owner;
}
public void setOwner(final String owner) {
this.owner = owner;
}
@ElementCollection
@MapKeyColumn(name = "parameters_idx")
@Column(name = "parameters_elt")
@CollectionTable(name = "info_parameters")
public Map<String, String> getParameters() {
return parameters;
}
public void setParameters(final Map<String, String> parameters) {
this.parameters = parameters;
}
@OneToOne
@JoinColumn(name = "location_id", nullable = false)
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
}
| 1,690 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Schema.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.OneToMany;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Schema.
*/
@Entity
@javax.persistence.Table(name = "schema_object",
uniqueConstraints = @UniqueConstraint(name = "schema_object_u1", columnNames = "location_id"))
public class Schema extends IdEntity {
private Location location;
private List<Field> fields;
@OneToOne
@JoinColumn(name = "location_id", nullable = false)
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "schema")
public List<Field> getFields() {
return fields;
}
public void setFields(final List<Field> fields) {
this.fields = fields;
}
}
| 1,691 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Table.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
/**
* Table.
*/
@Entity
@javax.persistence.Table(name = "table_object",
indexes = { @Index(name = "table_object_i1", columnList = "name") },
uniqueConstraints = @UniqueConstraint(name = "table_object_u1", columnNames = { "database_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES,
query = "select t from Table t where t.database.source.name=:sourceName and t.database.name=:databaseName"
+ " and t.name in (:tableNames)"
)
})
public class Table extends BaseTable {
/** Query name to get table for the given source, database and table names. */
public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES = "getBySourceDatabaseTableNames";
private Database database;
private Location location;
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "database_id", nullable = false)
public Database getDatabase() {
return database;
}
public void setDatabase(final Database database) {
this.database = database;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "table")
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
}
| 1,692 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Database.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Database.
*/
@Entity
@javax.persistence.Table(name = "database_object",
indexes = @Index(name = "database_object_i1", columnList = "name"),
uniqueConstraints = @UniqueConstraint(name = "database_object_u1", columnNames = { "source_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES,
query = "select d from Database d where d.source.name=:sourceName and d.name in (:databaseNames)"
)
})
public class Database extends IdEntity {
/** Named query name. */
public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES = "getBySourceDatabaseNames";
private String name;
private List<Table> tables;
private Source source;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "database")
public List<Table> getTables() {
return tables;
}
public void setTables(final List<Table> tables) {
this.tables = tables;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "source_id", nullable = false)
public Source getSource() {
return source;
}
public void setSource(final Source source) {
this.source = source;
}
}
| 1,693 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/BaseEntity.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import org.joda.time.Instant;
import javax.persistence.Column;
import javax.persistence.MappedSuperclass;
import javax.persistence.PrePersist;
import javax.persistence.PreUpdate;
import java.sql.Timestamp;
import java.util.Date;
/**
* {@code BaseEntity} is the entity that all entities.
*/
@MappedSuperclass
public class BaseEntity {
/** The date of creation. */
protected Date createdDate;
/** The last updated date. */
protected Date lastUpdatedDate;
/**
* Get the date and time of the entity creation.
*
* @return
* The date and time of the creation
*/
@Column(name = "date_created", insertable = true, updatable = false, nullable = false)
public Date getCreatedDate() {
return createdDate;
}
/**
* Set the date and time of the creation.
*
* @param createdDate
* The date and time of the creation
*/
public void setCreatedDate(final Date createdDate) {
this.createdDate = createdDate;
}
public void setCreatedDate(final Timestamp createdDate) {
this.createdDate = createdDate;
}
/**
* Get the date and time of the last update.
*
* @return
* Get the date and time of the last update.
*/
@Column(name = "last_updated", insertable = true, updatable = true, nullable = false)
public Date getLastUpdatedDate() {
return lastUpdatedDate;
}
/**
* Set the date and time of the last update.
*
* @param lastUpdatedDate
* The date and time of the last update
*/
public void setLastUpdatedDate(final Date lastUpdatedDate) {
this.lastUpdatedDate = lastUpdatedDate;
}
public void setLastUpdatedDate(final Timestamp lastUpdatedDate) {
this.lastUpdatedDate = lastUpdatedDate;
}
/**
* Insert.
*/
@PrePersist
public void onInsert() {
if (createdDate == null) {
setCreatedDate(Instant.now().toDate());
}
if (lastUpdatedDate == null) {
setLastUpdatedDate(Instant.now().toDate());
}
}
@PreUpdate
void onUpdate() {
if (lastUpdatedDate == null) {
setLastUpdatedDate(Instant.now().toDate());
}
}
/**
* Validate the entity for valid values.
*/
public void validate() {
}
}
| 1,694 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/IdEntity.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.Column;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.MappedSuperclass;
import javax.persistence.Version;
/**
* IdEntity.
*/
@MappedSuperclass
public class IdEntity extends BaseEntity {
private Long id;
private Long version;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id", unique = true, nullable = false)
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
@Version
@Column(name = "version", nullable = false)
public Long getVersion() {
return version;
}
public void setVersion(final Long version) {
this.version = version;
}
}
| 1,695 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/BaseTable.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.FetchType;
import javax.persistence.MappedSuperclass;
import javax.persistence.OneToMany;
import java.util.List;
/**
* Base Table.
*/
@MappedSuperclass
public abstract class BaseTable extends IdEntity {
private String name;
private List<Partition> partitions;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "table")
public List<Partition> getPartitions() {
return partitions;
}
public void setPartitions(final List<Partition> partitions) {
this.partitions = partitions;
}
}
| 1,696 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Location.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
/**
* Location.
*/
@Entity
@javax.persistence.Table(name = "location",
uniqueConstraints = @UniqueConstraint(name = "location_u1", columnNames = "table_id"))
public class Location extends IdEntity {
/*
static belongsTo = [table: Table]
static hasOne = [schema: Schema, info: Info]
//TODO: Serde info
String uri
*/
private String uri;
private Table table;
private Schema schema;
private Info info;
@Column(name = "uri", nullable = true)
public String getUri() {
return uri;
}
public void setUri(final String uri) {
this.uri = uri;
}
@OneToOne
@JoinColumn(name = "table_id", nullable = false)
public Table getTable() {
return table;
}
public void setTable(final Table table) {
this.table = table;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Schema getSchema() {
return schema;
}
public void setSchema(final Schema schema) {
this.schema = schema;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Info getInfo() {
return info;
}
public void setInfo(final Info info) {
this.info = info;
}
}
| 1,697 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Partition.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.UniqueConstraint;
/**
* Partition.
*/
@Entity
@javax.persistence.Table(name = "partition_table",
indexes = { @Index(name = "partition_table_i1", columnList = "name"),
@Index(name = "partition_table_i2", columnList = "uri") },
uniqueConstraints = @UniqueConstraint(name = "partition_table_u1", columnNames = { "table_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Partition.NAME_QUERY_GET_FOR_TABLE,
query = "select p from Partition p where p.table.name=:tableName and p.table.database.name=:databaseName"
+ " and p.table.database.source.name=:sourceName"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_COUNT_FOR_TABLE,
query = "select count(p) from Partition p where p.table.name=:tableName"
+ " and p.table.database.name=:databaseName and p.table.database.source.name=:sourceName"
),
@NamedQuery(
name = Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES,
query = "delete from Partition p where p.table.id = (select t.id from Table t where t.name=:tableName"
+ " and t.database.name=:databaseName and t.database.source.name=:sourceName)"
+ " and p.name in (:partitionNames)"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_BY_URI,
query = "select p from Partition p where p.uri in :uris"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_BY_URI_PREFIX,
query = "select p from Partition p where p.uri like :uri"
)
})
public class Partition extends IdEntity {
/** Query name to get partition for a given table. */
public static final String NAME_QUERY_GET_FOR_TABLE = "getForTable";
/** Query name to get partition count for a given table. */
public static final String NAME_QUERY_GET_COUNT_FOR_TABLE = "getCountForTable";
/** Query name to delete. */
public static final String NAME_QUERY_DELETE_BY_PARTITION_NAMES = "deleteByPartitionNames";
/** Query name to get partition for a given uri . */
public static final String NAME_QUERY_GET_BY_URI = "getByUri";
/** Query name to get partition for a given uri prefix. */
public static final String NAME_QUERY_GET_BY_URI_PREFIX = "getByUriPrefix";
private String name;
private String uri;
private Table table;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Column(name = "uri", nullable = false)
public String getUri() {
return uri;
}
public void setUri(final String uri) {
this.uri = uri;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "table_id", nullable = false)
public Table getTable() {
return table;
}
public void setTable(final Table table) {
this.table = table;
}
}
| 1,698 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Source.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.OneToMany;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Source.
*/
@Entity
@javax.persistence.Table(name = "source",
uniqueConstraints = @UniqueConstraint(name = "source_u1", columnNames = "name"))
public class Source extends IdEntity {
private String name;
private String type;
private String thriftUri;
private boolean disabled;
private List<Database> databases;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Column(name = "type", nullable = false)
public String getType() {
return type;
}
public void setType(final String type) {
this.type = type;
}
@Column(name = "thrift_uri")
public String getThriftUri() {
return thriftUri;
}
public void setThriftUri(final String thriftUri) {
this.thriftUri = thriftUri;
}
@Column(name = "disabled", nullable = false)
public boolean isDisabled() {
return disabled;
}
public void setDisabled(final boolean disabled) {
this.disabled = disabled;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "source")
public List<Database> getDatabases() {
return databases;
}
public void setDatabases(final List<Database> databases) {
this.databases = databases;
}
}
| 1,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.