index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client
|
Create_ds/metacat/metacat-client/src/main/java/com/netflix/metacat/client/api/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* API package for Metacat.
*
* @author amajumdar
*/
package com.netflix.metacat.client.api;
| 9,600 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.util.Map;
/**
* MySql implementation of a connector factory.
*
* @author tgianos
* @since 1.0.0
*/
class MySqlConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
MySqlConnectorFactory(
@Nonnull @NonNull final String name,
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
super(name, catalogShardName, Lists.newArrayList(new MySqlConnectorModule(catalogShardName, configuration)));
}
}
| 9,601 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.SQLException;
/**
* Convert MySQL exceptions into generic connector exceptions for use higher up in the system.
*
* @author tgianos
* @author zhenl
* @see SQLException
* @see ConnectorException
* @see <a href="https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-error-sqlstates.html">MySQL Ref</a>
* @since 1.0.0
*/
public class MySqlExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
@NonNull @Nonnull final SQLException se,
@Nonnull @NonNull final QualifiedName name
) {
switch (se.getErrorCode()) {
case 1007: //database already exists
return new DatabaseAlreadyExistsException(name, se);
case 1050: //table already exists
return new TableAlreadyExistsException(name, se);
case 1008: //database does not exist
return new DatabaseNotFoundException(name, se);
case 1146: //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 9,602 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorModule.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.sql.DataSource;
import java.util.Map;
/**
* A Guice Module for the MySqlConnector.
*
* @author tgianos
* @since 1.0.0
*/
public class MySqlConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName catalog shard name
* @param configuration connector configuration
*/
MySqlConnectorModule(
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(MySqlTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(MySqlExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, MySqlConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, MySqlConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 9,603 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
/**
* Type converter for MySQL.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class MySqlTypeConverter extends JdbcTypeConverter {
static final int MAX_BYTE_LENGTH = 65_535;
private static final int MIN_BYTE_LENGTH = 0;
/**
* {@inheritDoc}
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String type) {
// see: https://dev.mysql.com/doc/connector-j/6.0/en/connector-j-reference-type-conversions.html
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "bit":
return this.toMetacatBitType(splitType);
case "tinyint":
// TODO: MySQL generally treats this as boolean should we? Not according to spreadsheet currently
return BaseType.TINYINT;
case "bool":
case "boolean":
return BaseType.BOOLEAN;
case "smallint":
return BaseType.SMALLINT;
case "mediumint":
case "int":
case "integer":
return BaseType.INT;
case "bigint":
return BaseType.BIGINT;
case "float": // TODO: MySQL precision is lost
return BaseType.FLOAT;
case "double":
case "double precision":
return BaseType.DOUBLE;
case "decimal":
case "dec":
return this.toMetacatDecimalType(splitType);
case "date":
return BaseType.DATE;
case "time":
return this.toMetacatTimeType(splitType);
case "datetime":
case "timestamp":
return this.toMetacatTimestampType(splitType);
case "char":
return this.toMetacatCharType(splitType);
case "varchar":
return this.toMetacatVarcharType(splitType);
case "binary":
case "tinyblob":
case "blob":
case "mediumblob":
case "longblob":
case "varbinary":
return this.toMetacatVarbinaryType(splitType);
case "tinytext":
case "text":
case "mediumtext":
case "longtext":
return BaseType.STRING;
case "json":
return BaseType.JSON;
case "year":
case "enum":
case "set":
default:
log.info("Encountered {} type. Returning Unknown type.", splitType[0]);
return BaseType.UNKNOWN;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("MySQL doesn't support array types");
case BIGINT:
return "BIGINT";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected char type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
final int charLength = charType.getLength();
if (charLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("CHAR type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single char column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (charLength <= MAX_BYTE_LENGTH) {
return "CHAR(" + charLength + ")";
} else {
return "TEXT";
}
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
return "DOUBLE";
case FLOAT:
return "FLOAT(24)";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("MySQL doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("MySQL doesn't support interval types");
case JSON:
return "JSON";
case MAP:
throw new UnsupportedOperationException("MySQL doesn't support map types");
case ROW:
throw new UnsupportedOperationException("MySQL doesn't support row types");
case SMALLINT:
return "SMALLINT";
case STRING:
return "TEXT";
case TIME:
case TIME_WITH_TIME_ZONE:
return "TIME";
case TIMESTAMP:
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMP";
case TINYINT:
return "TINYINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
if (!(type instanceof VarbinaryType)) {
throw new IllegalArgumentException("Expected varbinary type but was " + type.getClass().getName());
}
final VarbinaryType varbinaryType = (VarbinaryType) type;
final int binaryLength = varbinaryType.getLength();
if (binaryLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("VARBINARY type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single varbinary column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (binaryLength <= MAX_BYTE_LENGTH) {
return "VARBINARY(" + binaryLength + ")";
} else {
return "BLOB";
}
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
final int varCharLength = varcharType.getLength();
if (varCharLength < MIN_BYTE_LENGTH) {
throw new IllegalArgumentException("VARCHAR type must have a length > 0");
}
// NOTE: Note that for MySQL the max column size is 65,535 bytes so technically you can have a table
// of a single varchar column of this length but that's it. Hard to handle that in here when
// just doing the conversions. It would have to be handled by higher level logic that had the
// entire picture.
if (varCharLength <= MAX_BYTE_LENGTH) {
return "VARCHAR(" + varCharLength + ")";
} else {
return "TEXT";
}
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 9,604 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorUtils;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Comparator;
import java.util.List;
import java.util.Locale;
/**
* MySql specific extension of the JdbcConnectorDatabaseService implementation for any differences from default.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class MySqlConnectorDatabaseService extends JdbcConnectorDatabaseService {
/**
* Constructor.
*
* @param dataSource The datasource to use
* @param exceptionMapper The exception mapper to use
*/
@Inject
public MySqlConnectorDatabaseService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
// Overrides the super class due to MySQL using catalog instead of schemas when trying to list database names
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database names for catalog {} for request {}", catalogName, context);
try (
final Connection connection = this.getDataSource().getConnection();
final ResultSet schemas = connection.getMetaData().getCatalogs()
) {
final List<QualifiedName> names = Lists.newArrayList();
while (schemas.next()) {
final String schemaName = schemas.getString("TABLE_CAT").toLowerCase(Locale.ENGLISH);
// skip internal schemas
if (!schemaName.equals("information_schema") && !schemaName.equals("mysql")) {
if (prefix == null) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
} else if (StringUtils.isNotBlank(prefix.getDatabaseName())
&& schemaName.startsWith(prefix.getDatabaseName())) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
}
}
}
// Does user want sorting?
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing database names for catalog {} for request {}", catalogName, context);
return results;
} catch (final SQLException se) {
log.debug("An exception occurred listing database names for catalog {} for request {}",
catalogName, context, se);
throw this.getExceptionMapper().toConnectorException(se, name);
}
}
}
| 9,605 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorTableService.java
|
package com.netflix.metacat.connector.mysql;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import lombok.extern.slf4j.Slf4j;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
/**
* Mysql table service implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class MySqlConnectorTableService extends JdbcConnectorTableService {
private static final String COL_CREATE_TIME = "create_time";
private static final String COL_UPDATE_TIME = "update_time";
private static final String SQL_GET_AUDIT_INFO
= "select create_time, update_time from information_schema.tables where table_schema=? and table_name=?";
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public MySqlConnectorTableService(
final DataSource dataSource,
final JdbcTypeConverter typeConverter,
final JdbcExceptionMapper exceptionMapper) {
super(dataSource, typeConverter, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try (
final PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)
) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getTableName());
try (final ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATE_TIME))
.lastModifiedDate(resultSet.getDate(COL_UPDATE_TIME)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
}
| 9,606 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/MySqlConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.mysql;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Implementation of the ConnectorPlugin interface for MySQL.
*
* @author tgianos
* @since 1.0.0
*/
public class MySqlConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "mysql";
private static final MySqlTypeConverter TYPE_CONVERTER = new MySqlTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new MySqlConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 9,607 |
0 |
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-mysql/src/main/java/com/netflix/metacat/connector/mysql/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* MySQL connector implementation classes to plugin a MySQL based data store.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.mysql;
| 9,608 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.SpringConnectorFactory;
import com.netflix.metacat.connector.druid.configs.DruidConnectorConfig;
import com.netflix.metacat.connector.druid.configs.DruidHttpClientConfig;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
/**
* Druid Connector Factory.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorFactory extends SpringConnectorFactory {
/**
* Constructor.
*
* @param connectorContext connector config
*/
DruidConnectorFactory(
final DruidConnectorInfoConverter druidConnectorInfoConverter,
final ConnectorContext connectorContext
) {
super(druidConnectorInfoConverter, connectorContext);
super.registerClazz(DruidConnectorConfig.class, DruidHttpClientConfig.class);
super.refresh();
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.ctx.getBean(DruidConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.ctx.getBean(DruidConnectorTableService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorPartitionService getPartitionService() {
return this.ctx.getBean(DruidConnectorPartitionService.class);
}
}
| 9,609 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.druid.converter.DataSource;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import com.netflix.metacat.connector.druid.converter.DruidConverterUtil;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
/**
* Druid implementation of the ConnectorPartitionService.
* The partition concept is used to model segment in druid.
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidConnectorPartitionService implements ConnectorPartitionService {
private final MetacatDruidClient druidClient;
private final DruidConnectorInfoConverter druidConnectorInfoConverter;
/**
* Constructor.
*
* @param druidClient druid client
* @param druidConnectorInfoConverter druid infor converter
*/
public DruidConnectorPartitionService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter
) {
this.druidClient = druidClient;
this.druidConnectorInfoConverter = druidConnectorInfoConverter;
}
/**
* {@inheritDoc}
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext context,
final QualifiedName name,
final TableInfo tableInfo
) {
final ObjectNode node = this.druidClient.getAllDataByName(name.getTableName());
return DruidConverterUtil.getSegmentCount(node);
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext context,
final QualifiedName name,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final ObjectNode node = this.druidClient.getAllDataByName(name.getTableName());
final DataSource dataSource = DruidConverterUtil.getDatasourceFromAllSegmentJsonObject(node);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
dataSource.getSegmentList().forEach(
p -> partitionInfos.add(this.druidConnectorInfoConverter.getPartitionInfoFromSegment(p)));
return partitionInfos;
}
}
| 9,610 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConfigConstants.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
/**
* Druid Config Constants.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidConfigConstants {
/**
* DRUID_COORDINATOR_URI.
*/
public static final String DRUID_COORDINATOR_URI = "druid.uri";
//Http client
/**
* HTTP_TIMEOUT.
*/
public static final String HTTP_TIMEOUT = "http.timeout";
/**
* POOL_SIZE.
*/
public static final String POOL_SIZE = "pool.size";
/**
* DRUID_DB.
*/
public static final String DRUID_DB = "default";
/**
* druid name.
*/
public static final String NAME = "name";
/**
* druid properties.
*/
public static final String PROPERTIES = "properties";
/**
* druid created.
*/
public static final String CREATED = "created";
/**
* druid dimensions.
*/
public static final String DIMENSIONS = "dimensions";
/**
* druid metrics.
*/
public static final String METRICS = "metrics";
/**
* druid segments.
*/
public static final String SEGMENTS = "segments";
/**
* size.
*/
public static final String SIZE = "size";
//Segment related information
/**
* dataSource.
*/
public static final String DATA_SOURCE = "dataSource";
/**
* interval.
*/
public static final String INTERVAL = "interval";
/**
* loadSpec.
*/
public static final String LOADSPEC = "loadSpec";
/**
* bucket.
*/
public static final String LOADSPEC_BUCKET = "bucket";
/**
* key.
*/
public static final String LOADSPEC_KEY = "key";
/**
* loadspec type.
*/
public static final String LOADSPEC_TYPE = "type";
/**
* identifier.
*/
public static final String IDENTIFIER = "identifier";
/**
* default value if empty.
*/
public static final String DEFAULT_VAULE = "NONE";
private DruidConfigConstants() {
}
}
| 9,611 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorPlugin.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
/**
* Druid Connector Plugin.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "druid";
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(final ConnectorContext connectorContext) {
return new DruidConnectorFactory(
new DruidConnectorInfoConverter(connectorContext.getCatalogName()), connectorContext);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return null;
}
}
| 9,612 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorDatabaseService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import javax.annotation.Nullable;
import java.util.List;
/**
* Druid Connector DatabaseService.
* No database concept in druid.
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorDatabaseService implements ConnectorDatabaseService {
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return Lists.newArrayList(QualifiedName.ofDatabase(name.getCatalogName(), DruidConfigConstants.DRUID_DB));
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
return DatabaseInfo.builder()
.name(QualifiedName.ofDatabase(name.getCatalogName(), DruidConfigConstants.DRUID_DB))
.build();
}
}
| 9,613 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/MetacatDruidClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.List;
/**
* Druid Client.
*
* @author zhenl
* @since 1.2.0
*/
public interface MetacatDruidClient {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported for metacat druid client";
/**
* Get all data sources.
*
* @return data source names
*/
default List<String> getAllDataSources() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns all segment data of data source.
*
* @param dataSourceName dataSourceName
* @return data source raw data
*/
default ObjectNode getAllDataByName(final String dataSourceName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the latest segment of data source.
*
* @param dataSourceName dataSourceName
* @return data source raw data
*/
default ObjectNode getLatestDataByName(final String dataSourceName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 9,614 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,615 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/DruidConnectorTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetadataException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.druid.converter.DataSource;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import com.netflix.metacat.connector.druid.converter.DruidConverterUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.web.client.HttpClientErrorException;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.List;
/**
* Druid Connector Table Service, which manages druid data source.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidConnectorTableService implements ConnectorTableService {
private final MetacatDruidClient druidClient;
private final DruidConnectorInfoConverter druidConnectorInfoConverter;
/**
* Constructor.
*
* @param druidClient druid client
* @param druidConnectorInfoConverter druid infor object converter
*/
public DruidConnectorTableService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter
) {
this.druidClient = druidClient;
this.druidConnectorInfoConverter = druidConnectorInfoConverter;
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Get table metadata for qualified name {} for request {}", name, context);
try {
final ObjectNode node = this.druidClient.getLatestDataByName(name.getTableName());
final DataSource dataSource = DruidConverterUtil.getDatasourceFromLatestSegmentJsonObject(node);
return this.druidConnectorInfoConverter.getTableInfoFromDatasource(dataSource);
} catch (MetacatException e) {
log.error(String.format("Table %s not found.", name), e);
throw new TableNotFoundException(name);
} catch (HttpClientErrorException e) {
log.error(String.format("Failed getting table %s.", name), e);
if (HttpStatus.NOT_FOUND.equals(e.getStatusCode())) {
throw new TableNotFoundException(name);
} else {
throw new InvalidMetadataException(String.format("Invalid table %s. %s", name, e.getMessage()));
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : this.druidClient.getAllDataSources()) {
final QualifiedName qualifiedName =
QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (tableFilter == null || tableName.startsWith(tableFilter)) {
qualifiedNames.add(qualifiedName);
}
}
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (Exception exception) {
throw new ConnectorException(String.format("Failed listNames druid table %s", name), exception);
}
}
}
| 9,616 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/LoadSpec.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Builder;
import lombok.Data;
import java.util.List;
/**
* Load spec.
* @author zhenl
* @since 1.2.0
*/
@Data
@Builder
public class LoadSpec {
private final String type;
private final String bucket;
private final List<String> keys;
private final String uri;
}
| 9,617 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Interval.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
import java.time.Instant;
/**
* Interval.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class Interval implements Comparable<Interval> {
private final Instant start;
private final Instant end;
@Override
public int compareTo(final Interval interval) {
return this.getStart().compareTo(interval.getStart());
}
}
| 9,618 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Segment.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
/**
* Druid Datasource.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class Segment implements Comparable<Segment> {
private final String name;
private final Interval interval;
private final String dimensions;
private final String metric;
private final LoadSpec loadSpec;
@Override
public int compareTo(final Segment segment) {
return this.getInterval().compareTo(segment.getInterval());
}
}
| 9,619 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/Database.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
/**
* Druid Database.
* This is no equivalent of database in druid, this database is 'default' always.
* @author zhenl
* @since 1.2.0
*/
@Getter
@SuppressFBWarnings
public class Database {
private final String name = DruidConfigConstants.DRUID_DB;
}
| 9,620 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DruidConverterUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import org.apache.commons.lang3.StringUtils;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Druid Converter Util.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidConverterUtil {
private DruidConverterUtil() {
}
/**
* get segment.
*
* @param node object node
* @return segment object
*/
public static DataSource getDatasourceFromAllSegmentJsonObject(final ObjectNode node) {
final Instant createTime = Instant.parse(
node.get(DruidConfigConstants.PROPERTIES)
.get(DruidConfigConstants.CREATED).asText());
final String name = node.get(DruidConfigConstants.NAME).asText();
final List<Segment> segmentList = new ArrayList<>();
for (JsonNode segNode : node.get(DruidConfigConstants.SEGMENTS)) {
final Segment segment = getSegmentFromJsonNode(segNode.deepCopy());
segmentList.add(segment);
}
Collections.sort(segmentList);
return new DataSource(name, createTime, segmentList);
}
/**
* get segment.
*
* @param node object node
* @return segment object
*/
public static DataSource getDatasourceFromLatestSegmentJsonObject(final ObjectNode node) {
final Segment segment = getSegmentFromJsonNode(node);
return new DataSource(segment.getName(), segment.getInterval().getStart(), Collections.singletonList(segment));
}
/**
* get segment count.
*
* @param node object node
* @return segment count
*/
public static int getSegmentCount(final ObjectNode node) {
return node.get(DruidConfigConstants.SEGMENTS).size();
}
private static String getUriFromKey(final String bucket, final String key) {
return bucket + "/" + key.substring(0, key.lastIndexOf("/"));
}
private static Segment getSegmentFromJsonNode(final ObjectNode node) {
final String name = node.get(DruidConfigConstants.DATA_SOURCE).asText();
final String[] intervalStr = node.get(DruidConfigConstants.INTERVAL).asText().split("/");
final Interval interval = new Interval(Instant.parse(intervalStr[0]),
Instant.parse(intervalStr[1]));
final JsonNode loadspecNode = node.get(DruidConfigConstants.LOADSPEC);
final JsonNode loadspecNodeBucket = loadspecNode.get(DruidConfigConstants.LOADSPEC_BUCKET);
// Checking for Null before accessing the node as bucket and key could be null in load spec
final String bucket = loadspecNodeBucket != null
? loadspecNode.get(DruidConfigConstants.LOADSPEC_BUCKET).asText() : "";
final JsonNode loadspecNodeKey = loadspecNode.get(DruidConfigConstants.LOADSPEC_KEY);
final List<String> keys = loadspecNodeKey != null
? Arrays.asList(loadspecNode.get(DruidConfigConstants.LOADSPEC_KEY).asText().split(","))
: new ArrayList<>();
final LoadSpec loadSpec = new LoadSpec(loadspecNode.get(DruidConfigConstants.LOADSPEC_TYPE).asText(),
bucket, keys, StringUtils.isEmpty(bucket) || keys.size() == 0
? "" : getUriFromKey(bucket, keys.get(0))
);
final String dimensions = node.get(DruidConfigConstants.DIMENSIONS).asText();
final String metric = node.get(DruidConfigConstants.METRICS).asText();
return new Segment(name, interval, dimensions, metric, loadSpec);
}
}
| 9,621 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DruidConnectorInfoConverter.java
|
package com.netflix.metacat.connector.druid.converter;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Druid Info Converter.
*
* @author zhenl
* @since 1.2.0
*/
public class DruidConnectorInfoConverter implements ConnectorInfoConverter<Database, Database, Segment> {
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog Name
*/
public DruidConnectorInfoConverter(final String catalogName) {
this.catalogName = catalogName;
}
/**
* Convert from data source to partitionInfo.
*
* @param segment segment object
* @return partition info object
*/
public PartitionInfo getPartitionInfoFromSegment(final Segment segment) {
final Map<String, String> metadata = new HashMap<>();
metadata.put(DruidConfigConstants.LOADSPEC_KEY, segment.getLoadSpec().getKeys().toString());
metadata.put(DruidConfigConstants.LOADSPEC_BUCKET, segment.getLoadSpec().getBucket());
metadata.put(DruidConfigConstants.LOADSPEC_TYPE, segment.getLoadSpec().getType());
metadata.put(DruidConfigConstants.DIMENSIONS, segment.getDimensions());
metadata.put(DruidConfigConstants.METRICS, segment.getMetric());
final StorageInfo storageInfo = StorageInfo.builder().uri(segment.getLoadSpec().getUri()).build();
return PartitionInfo.builder().metadata(metadata).serde(storageInfo).build();
}
/**
* Convert from data source to partitionInfo.
*
* @param dataSource dataSource object
* @return table info object
*/
public TableInfo getTableInfoFromDatasource(final DataSource dataSource) {
final List<Segment> segmentList = dataSource.getSegmentList();
final Segment latestSegment = segmentList.get(segmentList.size() - 1);
final List<FieldInfo> fieldInfos = new ArrayList<>();
for (String dim : latestSegment.getDimensions().split(",")) {
fieldInfos.add(FieldInfo.builder()
.comment(DruidConfigConstants.DIMENSIONS)
.name(dim)
.type(BaseType.STRING)
.build());
}
for (String dim : latestSegment.getMetric().split(",")) {
fieldInfos.add(FieldInfo.builder()
.comment(DruidConfigConstants.METRICS)
.name(dim)
.type(BaseType.DOUBLE)
.build());
}
return TableInfo.builder().fields(fieldInfos)
.name(QualifiedName.ofTable(catalogName, DruidConfigConstants.DRUID_DB, dataSource.getName()))
.build();
}
}
| 9,622 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector converter.
*
* @author zhenl
* @since 1.2.0
*
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.converter;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,623 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/converter/DataSource.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.converter;
import lombok.Data;
import java.time.Instant;
import java.util.List;
/**
* Druid Datasource.
*
* @author zhenl
* @since 1.2.0
*/
@Data
public class DataSource {
private final String name;
private final Instant createTime;
private final List<Segment> segmentList;
}
| 9,624 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/DruidHttpClientConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.util.TimeUtil;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import com.netflix.metacat.connector.druid.client.DruidHttpClientImpl;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.web.client.RestTemplate;
import java.net.UnknownHostException;
import java.util.concurrent.TimeUnit;
/**
* DruidHttpClientConfig.
*
* @author zhenl
* @since 1.2.0
*/
@Configuration
public class DruidHttpClientConfig {
/**
* Druid client instance.
*
* @param connectorContext connector context
* @param restTemplate rest template
* @return MetacatDruidClient
* @throws UnknownHostException exception for unknownhost
*/
@Bean
public MetacatDruidClient createMetacatDruidClient(
final ConnectorContext connectorContext,
final RestTemplate restTemplate) throws UnknownHostException {
return new DruidHttpClientImpl(connectorContext, restTemplate);
}
/**
* Rest template.
*
* @param connectorContext connector context
* @return RestTemplate
*/
@Bean
public RestTemplate restTemplate(final ConnectorContext connectorContext) {
return new RestTemplate(new HttpComponentsClientHttpRequestFactory(httpClient(connectorContext)));
}
/**
* Http client.
*
* @param connectorContext connector context
* @return HttpClient
*/
@Bean
public HttpClient httpClient(final ConnectorContext connectorContext) {
final int timeout = (int) TimeUtil.toTime(
connectorContext.getConfiguration().getOrDefault(DruidConfigConstants.HTTP_TIMEOUT, "5s"),
TimeUnit.SECONDS,
TimeUnit.MILLISECONDS
);
final int poolsize = Integer.parseInt(connectorContext.getConfiguration()
.getOrDefault(DruidConfigConstants.POOL_SIZE, "10"));
final RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout)
.setMaxRedirects(3)
.build();
final PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(poolsize);
return HttpClientBuilder
.create()
.setDefaultRequestConfig(config)
.setConnectionManager(connectionManager)
.build();
}
}
| 9,625 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.configs;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,626 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/configs/DruidConnectorConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.configs;
import com.netflix.metacat.connector.druid.DruidConnectorDatabaseService;
import com.netflix.metacat.connector.druid.DruidConnectorPartitionService;
import com.netflix.metacat.connector.druid.DruidConnectorTableService;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import com.netflix.metacat.connector.druid.converter.DruidConnectorInfoConverter;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Druid Connector Config.
*
* @author zhenl
* @since 1.2.0
*/
@Configuration
public class DruidConnectorConfig {
/**
* create druid connector table service.
*
* @param druidClient druid Client
* @param druidConnectorInfoConverter druid info converter
* @return druid connector table Service
*/
@Bean
public DruidConnectorTableService druidTableService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter) {
return new DruidConnectorTableService(
druidClient,
druidConnectorInfoConverter
);
}
/**
* create druid connector database service.
*
* @return druid connector database Service
*/
@Bean
public DruidConnectorDatabaseService druidDatabaseService() {
return new DruidConnectorDatabaseService();
}
/**
* create druid connector partition service.
*
* @param druidClient druid Client
* @param druidConnectorInfoConverter druid info converter
* @return druid connector partition Service
*/
@Bean
public DruidConnectorPartitionService druidPartitionService(
final MetacatDruidClient druidClient,
final DruidConnectorInfoConverter druidConnectorInfoConverter) {
return new DruidConnectorPartitionService(druidClient, druidConnectorInfoConverter);
}
}
| 9,627 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/DruidHttpClientUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.client;
/**
* DruidHttpClientUtil.
*
* @author zhenl
* @since 1.2.0
*/
public final class DruidHttpClientUtil {
private DruidHttpClientUtil() {
}
/**
* get Latest Segment.
*
* @param input segments strings
* @return lastest segment id
*/
public static String getLatestSegment(final String input) {
final String[] segments = input.substring(1, input.length() - 1).split(",");
String current = segments[0].trim().replace("\"", "");
for (int i = 1; i < segments.length; i++) {
final String next = segments[i].trim().replace("\"", "");
if (current.compareTo(next) <= 0) {
current = next;
}
}
return current;
}
}
| 9,628 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/DruidHttpClientImpl.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.druid.client;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.connector.druid.DruidConfigConstants;
import com.netflix.metacat.connector.druid.MetacatDruidClient;
import lombok.extern.slf4j.Slf4j;
import org.json.JSONArray;
import org.springframework.web.client.RestTemplate;
import javax.annotation.Nullable;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* DruidHttpClientImpl.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class DruidHttpClientImpl implements MetacatDruidClient {
private String druidURI;
private final RestTemplate restTemplate;
private final MetacatJsonLocator jsonLocator = new MetacatJsonLocator();
/**
* Constructor.
*
* @param connectorContext connector context
* @param restTemplate rest template
*/
public DruidHttpClientImpl(final ConnectorContext connectorContext,
final RestTemplate restTemplate) {
this.restTemplate = restTemplate;
final Map<String, String> config = connectorContext.getConfiguration();
final String coordinatorUri = config.get(DruidConfigConstants.DRUID_COORDINATOR_URI);
if (coordinatorUri == null) {
throw new MetacatException("Druid cluster ending point not provided.");
}
try {
new URI(coordinatorUri);
} catch (URISyntaxException exception) {
throw new MetacatException("Druid ending point invalid");
}
this.druidURI = coordinatorUri;
log.info("druid server uri={}", this.druidURI);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getAllDataSources() {
final JSONArray arr = new JSONArray(restTemplate.getForObject(druidURI, String.class));
return IntStream.range(0, arr.length()).mapToObj(i -> arr.get(i).toString()).collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
@Nullable
public ObjectNode getAllDataByName(final String dataSourceName) {
final String result = restTemplate.getForObject(
druidURI + "/{datasoureName}?full", String.class, dataSourceName);
return jsonLocator.parseJsonObject(result);
}
/**
* {@inheritDoc}
*/
@Override
@Nullable
public ObjectNode getLatestDataByName(final String dataSourceName) {
String url = String.format(druidURI + "/%s/segments", dataSourceName);
String result = restTemplate.getForObject(url, String.class);
if (result == null) {
throw new MetacatException(String.format("Druid cluster: %s result not found.", dataSourceName));
}
final String latestSegment = DruidHttpClientUtil.getLatestSegment(result);
log.debug("Get the latest segment {}", latestSegment);
url = String.format(druidURI + "/%s/segments/%s", dataSourceName, latestSegment);
result = restTemplate.getForObject(url, String.class);
return jsonLocator.parseJsonObject(result);
}
}
| 9,629 |
0 |
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid
|
Create_ds/metacat/metacat-connector-druid/src/main/java/com/netflix/metacat/connector/druid/client/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* druid connector client.
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.druid.client;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,630 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorFactory.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.persist.PersistService;
import com.google.inject.persist.jpa.JpaPersistModule;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import java.util.Map;
/**
* s3 connector factory.
*/
public class S3ConnectorFactory implements ConnectorFactory {
private final String catalogName;
private final String catalogShardName;
private final Map<String, String> configuration;
private final S3ConnectorInfoConverter infoConverter;
private ConnectorDatabaseService databaseService;
private ConnectorTableService tableService;
private ConnectorPartitionService partitionService;
private PersistService persistService;
/**
* Constructor.
* @param catalogName catalog name.
* @param catalogShardName catalog shard name
* @param configuration configuration properties
* @param infoConverter S3 info converter
*/
public S3ConnectorFactory(final String catalogName, final String catalogShardName,
final Map<String, String> configuration,
final S3ConnectorInfoConverter infoConverter) {
Preconditions.checkNotNull(catalogName, "Catalog name is null");
Preconditions.checkNotNull(catalogShardName, "Catalog shard name is null");
Preconditions.checkNotNull(configuration, "Catalog connector configuration is null");
this.catalogName = catalogName;
this.catalogShardName = catalogShardName;
this.configuration = configuration;
this.infoConverter = infoConverter;
init();
}
private void init() {
//JPA module
final Map<String, Object> props = Maps.newHashMap(configuration);
props.put("hibernate.connection.datasource",
DataSourceManager.get().load(catalogShardName, configuration).get(catalogShardName));
final Module jpaModule = new JpaPersistModule("s3").properties(props);
final Module s3Module = new S3Module(catalogName, configuration, infoConverter);
final Injector injector = Guice.createInjector(jpaModule, s3Module);
persistService = injector.getInstance(PersistService.class);
persistService.start();
this.databaseService = injector.getInstance(ConnectorDatabaseService.class);
this.tableService = injector.getInstance(ConnectorTableService.class);
this.partitionService = injector.getInstance(ConnectorPartitionService.class);
}
@Override
public ConnectorDatabaseService getDatabaseService() {
return databaseService;
}
@Override
public ConnectorTableService getTableService() {
return tableService;
}
@Override
public ConnectorPartitionService getPartitionService() {
return partitionService;
}
@Override
public String getCatalogName() {
return catalogName;
}
@Override
public String getCatalogShardName() {
return catalogShardName;
}
@Override
public void stop() {
persistService.stop();
}
}
| 9,631 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3Module.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.google.inject.Binder;
import com.google.inject.Module;
import com.google.inject.Scopes;
import com.google.inject.name.Names;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.dao.impl.DatabaseDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.FieldDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.PartitionDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.SourceDaoImpl;
import com.netflix.metacat.connector.s3.dao.impl.TableDaoImpl;
import java.util.Map;
/**
* Guice module.
*/
public class S3Module implements Module {
private final String catalogName;
private final Map<String, String> configuration;
private final S3ConnectorInfoConverter infoConverter;
/**
* Constructor.
* @param catalogName catalog name.
* @param configuration configuration properties
* @param infoConverter S3 info converter
*/
public S3Module(final String catalogName, final Map<String, String> configuration,
final S3ConnectorInfoConverter infoConverter) {
this.catalogName = catalogName;
this.configuration = configuration;
this.infoConverter = infoConverter;
}
@Override
public void configure(final Binder binder) {
binder.bind(String.class).annotatedWith(Names.named("catalogName")).toInstance(catalogName);
binder.bind(ConnectorInfoConverter.class).toInstance(infoConverter);
binder.bind(S3ConnectorInfoConverter.class).toInstance(infoConverter);
binder.bind(ConnectorDatabaseService.class).to(S3ConnectorDatabaseService.class).in(Scopes.SINGLETON);
binder.bind(ConnectorTableService.class).to(S3ConnectorTableService.class).in(Scopes.SINGLETON);
binder.bind(ConnectorPartitionService.class).to(S3ConnectorPartitionService.class).in(Scopes.SINGLETON);
binder.bind(DatabaseDao.class).to(DatabaseDaoImpl.class);
binder.bind(PartitionDao.class).to(PartitionDaoImpl.class);
binder.bind(SourceDao.class).to(SourceDaoImpl.class);
binder.bind(TableDao.class).to(TableDaoImpl.class);
binder.bind(FieldDao.class).to(FieldDaoImpl.class);
}
}
| 9,632 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Database;
import com.netflix.metacat.connector.s3.model.Field;
import com.netflix.metacat.connector.s3.model.Info;
import com.netflix.metacat.connector.s3.model.Location;
import com.netflix.metacat.connector.s3.model.Schema;
import com.netflix.metacat.connector.s3.model.Table;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* S3 Connector implementation for tables.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorTableService implements ConnectorTableService {
private final DatabaseDao databaseDao;
private final TableDao tableDao;
private final FieldDao fieldDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param databaseDao database DAO impl
* @param tableDao table DAO impl
* @param fieldDao field DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorTableService(@Named("catalogName") final String catalogName,
final DatabaseDao databaseDao,
final TableDao tableDao,
final FieldDao fieldDao,
final S3ConnectorInfoConverter infoConverter) {
this.catalogName = catalogName;
this.databaseDao = databaseDao;
this.tableDao = tableDao;
this.fieldDao = fieldDao;
this.infoConverter = infoConverter;
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Create table {}", tableInfo.getName());
Preconditions.checkArgument(tableInfo.getSerde() == null
|| !Strings.isNullOrEmpty(tableInfo.getSerde().getOwner()), "Table owner is null or empty");
final QualifiedName tableName = tableInfo.getName();
if (tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(),
tableName.getTableName()) != null) {
throw new TableAlreadyExistsException(tableName);
}
final Database database = databaseDao
.getBySourceDatabaseName(catalogName, tableName.getDatabaseName());
if (database == null) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(catalogName, tableName.getDatabaseName()));
}
tableDao.save(infoConverter.fromTableInfo(database, tableInfo));
log.debug("End: Create table {}", tableInfo.getName());
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Update table {}", tableInfo.getName());
final QualifiedName tableName = tableInfo.getName();
final Table table = tableDao
.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName());
if (table == null) {
throw new TableNotFoundException(tableName);
}
//we can update the fields, the uri, or the full serde
final Location newLocation = infoConverter.toLocation(tableInfo);
Location location = table.getLocation();
if (location == null) {
location = new Location();
location.setTable(table);
table.setLocation(location);
}
if (newLocation.getUri() != null) {
location.setUri(newLocation.getUri());
}
final Info newInfo = newLocation.getInfo();
if (newInfo != null) {
final Info info = location.getInfo();
if (info == null) {
location.setInfo(newInfo);
newInfo.setLocation(location);
} else {
if (newInfo.getInputFormat() != null) {
info.setInputFormat(newInfo.getInputFormat());
}
if (newInfo.getOutputFormat() != null) {
info.setOutputFormat(newInfo.getOutputFormat());
}
if (newInfo.getOwner() != null) {
info.setOwner(newInfo.getOwner());
}
if (newInfo.getSerializationLib() != null) {
info.setSerializationLib(newInfo.getSerializationLib());
}
if (newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()) {
info.setParameters(newInfo.getParameters());
}
}
}
final Schema newSchema = newLocation.getSchema();
if (newSchema != null) {
final List<Field> newFields = newSchema.getFields();
if (newFields != null && !newFields.isEmpty()) {
final Schema schema = location.getSchema();
if (schema == null) {
location.setSchema(newSchema);
newSchema.setLocation(location);
} else {
final List<Field> fields = schema.getFields();
if (fields.isEmpty()) {
newFields.forEach(field -> {
field.setSchema(schema);
fields.add(field);
});
} else {
for (int i = 0; i < newFields.size(); i++) {
final Field newField = newFields.get(i);
newField.setPos(i);
newField.setSchema(schema);
if (newField.getType() == null) {
newField.setType(newField.getSourceType());
}
}
schema.setFields(null);
fieldDao.delete(fields);
tableDao.save(table, true);
schema.setFields(newFields);
}
}
}
}
log.debug("End: Update table {}", tableInfo.getName());
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete table {}", name);
final Table table = tableDao.getBySourceDatabaseTableName(catalogName,
name.getDatabaseName(), name.getTableName());
if (table == null) {
throw new TableNotFoundException(name);
}
tableDao.delete(table);
log.debug("End: Delete table {}", name);
}
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final Table table = tableDao.getBySourceDatabaseTableName(catalogName,
name.getDatabaseName(), name.getTableName());
if (table == null) {
throw new TableNotFoundException(name);
}
log.debug("Get table {}", name);
return infoConverter.toTableInfo(name, table);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return tableDao.getBySourceDatabaseTableName(catalogName, name.getDatabaseName(), name.getTableName()) != null;
}
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List tables for database {} with table name prefix {}", name, prefix);
return tableDao.searchBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
prefix == null ? null : prefix.getTableName(), sort, pageable).stream()
.map(t -> infoConverter.toTableInfo(QualifiedName.ofTable(catalogName, name.getDatabaseName(), t.getName()),
t)).collect(Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List table names for database {} with table name prefix {}", name, prefix);
return tableDao.searchBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
prefix == null ? null : prefix.getTableName(), sort, pageable).stream()
.map(t -> QualifiedName.ofTable(catalogName, name.getDatabaseName(), t.getName()))
.collect(Collectors.toList());
}
@Override
public void rename(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
log.debug("Start: Rename table {} with {}", oldName, newName);
final Table oldTable = tableDao.getBySourceDatabaseTableName(catalogName,
oldName.getDatabaseName(), oldName.getTableName());
if (oldTable == null) {
throw new TableNotFoundException(oldName);
}
final Table newTable = tableDao.getBySourceDatabaseTableName(catalogName,
newName.getDatabaseName(), newName.getTableName());
if (newTable == null) {
oldTable.setName(newName.getTableName());
tableDao.save(oldTable);
} else {
throw new TableAlreadyExistsException(newName);
}
log.debug("End: Rename table {} with {}", oldName, newName);
}
@Override
public Map<String, List<QualifiedName>> getTableNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return tableDao.getByUris(catalogName, uris, prefixSearch);
}
}
| 9,633 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorInfoConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeManager;
import com.netflix.metacat.common.type.TypeSignature;
import com.netflix.metacat.connector.pig.converters.PigTypeConverter;
import com.netflix.metacat.connector.s3.model.Database;
import com.netflix.metacat.connector.s3.model.Field;
import com.netflix.metacat.connector.s3.model.Info;
import com.netflix.metacat.connector.s3.model.Location;
import com.netflix.metacat.connector.s3.model.Partition;
import com.netflix.metacat.connector.s3.model.Schema;
import com.netflix.metacat.connector.s3.model.Source;
import com.netflix.metacat.connector.s3.model.Table;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Converts S3 model objects to Metacat DTOs and vice versa.
* @author amajumdar
*/
public class S3ConnectorInfoConverter implements ConnectorInfoConverter<Database, Table, Partition> {
private final PigTypeConverter pigTypeConverter;
private final boolean isUsePigTypes;
private final TypeManager typeManager;
/**
* Constructor.
* @param pigTypeConverter Type converter for PIG
* @param isUsePigTypes true, if we need to use pig type converter
* @param typeManager Type manager
*/
public S3ConnectorInfoConverter(final PigTypeConverter pigTypeConverter, final boolean isUsePigTypes,
final TypeManager typeManager) {
this.pigTypeConverter = pigTypeConverter;
this.isUsePigTypes = isUsePigTypes;
this.typeManager = typeManager;
}
@Override
public DatabaseInfo toDatabaseInfo(final QualifiedName catalogName, final Database database) {
final AuditInfo auditInfo = AuditInfo.builder().createdDate(database.getCreatedDate())
.lastModifiedDate(database.getLastUpdatedDate()).build();
return DatabaseInfo.builder().name(QualifiedName.ofDatabase(catalogName.getCatalogName(), database.getName()))
.auditInfo(auditInfo).build();
}
@Override
public Database fromDatabaseInfo(final DatabaseInfo databaseInfo) {
final Database result = new Database();
final QualifiedName databaseName = databaseInfo.getName();
result.setName(databaseName.getDatabaseName());
final Source source = new Source();
source.setName(databaseName.getCatalogName());
result.setSource(source);
return result;
}
@Override
public TableInfo toTableInfo(final QualifiedName tableName, final Table table) {
return TableInfo.builder().name(tableName).fields(toFields(table)).auditInfo(toAuditInfo(table))
.serde(toStorageInfo(table)).build();
}
private List<FieldInfo> toFields(final Table table) {
List<FieldInfo> result = Lists.newArrayList();
final Location location = table.getLocation();
if (location != null) {
final Schema schema = location.getSchema();
if (schema != null) {
result = schema.getFields().stream().sorted(Comparator.comparing(Field::getPos))
.map(this::toFieldInfo).collect(Collectors.toList());
}
}
return result;
}
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
final Table result = new Table();
result.setName(tableInfo.getName().getTableName());
final Location location = toLocation(tableInfo);
if (location != null) {
result.setLocation(location);
location.setTable(result);
}
return result;
}
/**
* Creates the s3 table.
* @param database s3 database
* @param tableInfo table info
* @return s3 table
*/
public Table fromTableInfo(final Database database, final TableInfo tableInfo) {
final Table result = fromTableInfo(tableInfo);
result.setDatabase(database);
return result;
}
@Override
public PartitionInfo toPartitionInfo(final TableInfo tableInfo, final Partition partition) {
final QualifiedName tableName = tableInfo.getName();
final StorageInfo storageInfo = tableInfo.getSerde();
storageInfo.setUri(partition.getUri());
final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate())
.lastModifiedDate(partition.getLastUpdatedDate())
.build();
final AuditInfo tableAuditInfo = tableInfo.getAudit();
if (tableAuditInfo != null) {
auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy());
auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy());
}
return PartitionInfo.builder()
.name(QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(), tableName.getTableName(), partition.getName()))
.serde(storageInfo)
.auditInfo(auditInfo)
.build();
}
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partitionInfo) {
return fromPartitionInfo(partitionInfo);
}
/**
* Converts from partition info to s3 partition object.
* @param partitionInfo partition info
* @return s3 partition
*/
Partition fromPartitionInfo(final PartitionInfo partitionInfo) {
final Partition result = new Partition();
result.setName(partitionInfo.getName().getPartitionName());
result.setUri(partitionInfo.getSerde().getUri());
final AuditInfo auditInfo = partitionInfo.getAudit();
if (auditInfo != null) {
result.setCreatedDate(auditInfo.getCreatedDate());
result.setLastUpdatedDate(auditInfo.getLastModifiedDate());
}
return result;
}
/**
* Returns a partition info.
* @param tableName table name
* @param table s3 table
* @param partition partition
* @return partition info
*/
PartitionInfo toPartitionInfo(final QualifiedName tableName, final Table table, final Partition partition) {
final StorageInfo storageInfo = toStorageInfo(table);
storageInfo.setUri(partition.getUri());
final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate())
.lastModifiedDate(partition.getLastUpdatedDate())
.build();
final AuditInfo tableAuditInfo = toAuditInfo(table);
if (tableAuditInfo != null) {
auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy());
auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy());
}
return PartitionInfo.builder()
.name(QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(), tableName.getTableName(), partition.getName()))
.serde(storageInfo)
.auditInfo(auditInfo)
.build();
}
/**
* Converts from s3 table info to storage info.
* @param table table info
* @return table info
*/
StorageInfo toStorageInfo(final Table table) {
StorageInfo result = null;
final Location location = table.getLocation();
if (location != null) {
final Map<String, String> infoParameters = Maps.newHashMap();
result = new StorageInfo();
result.setUri(location.getUri());
final Info info = location.getInfo();
if (info != null) {
result.setOwner(info.getOwner());
result.setInputFormat(info.getInputFormat());
result.setOutputFormat(info.getOutputFormat());
result.setSerializationLib(info.getSerializationLib());
if (info.getParameters() != null) {
infoParameters.putAll(info.getParameters());
}
}
result.setSerdeInfoParameters(infoParameters);
result.setParameters(Maps.newHashMap());
}
return result;
}
/**
* Gets the owner for the given table.
* @param table table info
* @return owner name
*/
public String getOwner(final Table table) {
String result = null;
final Location location = table.getLocation();
if (location != null) {
final Info info = location.getInfo();
if (info != null) {
result = info.getOwner();
}
}
return result;
}
/**
* Converts from storage info to s3 location.
* @param storageInfo storage info
* @return location
*/
Location fromStorageInfo(final StorageInfo storageInfo) {
final Location result = new Location();
if (storageInfo != null) {
result.setUri(storageInfo.getUri());
final Info info = new Info();
info.setLocation(result);
info.setOwner(storageInfo.getOwner());
info.setInputFormat(storageInfo.getInputFormat());
info.setOutputFormat(storageInfo.getOutputFormat());
info.setSerializationLib(storageInfo.getSerializationLib());
final Map<String, String> parameters = Maps.newHashMap();
if (storageInfo.getParameters() != null) {
parameters.putAll(storageInfo.getParameters());
}
if (storageInfo.getSerdeInfoParameters() != null) {
parameters.putAll(storageInfo.getSerdeInfoParameters());
}
info.setParameters(parameters);
result.setInfo(info);
}
return result;
}
/**
* Creates list of fields from table info.
* @param tableInfo table info
* @param schema schema
* @return list of fields
*/
public List<Field> toFields(final TableInfo tableInfo, final Schema schema) {
final ImmutableList.Builder<Field> columns = ImmutableList.builder();
int index = 0;
for (FieldInfo fieldInfo : tableInfo.getFields()) {
final Field field = toField(fieldInfo);
field.setPos(index++);
field.setSchema(schema);
columns.add(field);
}
return columns.build();
}
/**
* Converts from column metadata to field.
* @param fieldInfo column
* @return field
*/
public Field toField(final FieldInfo fieldInfo) {
final Field result = new Field();
result.setName(fieldInfo.getName());
result.setPartitionKey(fieldInfo.isPartitionKey());
result.setComment(fieldInfo.getComment());
result.setSourceType(fieldInfo.getSourceType());
result.setType(toTypeString(fieldInfo.getType()));
return result;
}
/**
* Converts from column metadata to field.
* @param field column
* @return field info
*/
public FieldInfo toFieldInfo(final Field field) {
return FieldInfo.builder().name(field.getName()).partitionKey(field.isPartitionKey())
.comment(field.getComment()).sourceType(field.getSourceType()).type(toType(field.getType())).build();
}
private String toTypeString(final Type type) {
String result = null;
if (isUsePigTypes) {
result = pigTypeConverter.fromMetacatType(type);
} else {
result = type.getDisplayName();
}
return result;
}
/**
* Converts from type string to Metacat type.
* @param type type
* @return Type
*/
public Type toType(final String type) {
Type result = null;
if (isUsePigTypes) {
//Hack for now. We need to correct the type format in Franklin
String typeString = type;
if ("map".equals(type)) {
typeString = "map[]";
}
result = pigTypeConverter.toMetacatType(typeString);
} else {
result = typeManager.getType(TypeSignature.parseTypeSignature(type));
}
return result;
}
/**
* Creates audit info from s3 table info.
* @param table table info
* @return audit info
*/
public AuditInfo toAuditInfo(final Table table) {
final AuditInfo result = AuditInfo.builder().createdDate(table.getCreatedDate())
.lastModifiedDate(table.getLastUpdatedDate()).build();
final Location location = table.getLocation();
if (location != null) {
final Info info = location.getInfo();
if (info != null) {
result.setCreatedBy(info.getOwner());
result.setLastModifiedBy(info.getOwner());
}
}
return result;
}
/**
* Creates location.
* @param tableInfo table info
* @return location
*/
public Location toLocation(final TableInfo tableInfo) {
final Location location = fromStorageInfo(tableInfo.getSerde());
final Schema schema = new Schema();
schema.setLocation(location);
schema.setFields(toFields(tableInfo, schema));
location.setSchema(schema);
return location;
}
/**
* Creates s3 partition.
* @param table table
* @param partitionInfo partition info
* @return partition
*/
public Partition toPartition(final Table table, final PartitionInfo partitionInfo) {
final Partition result = fromPartitionInfo(partitionInfo);
result.setTable(table);
return result;
}
/**
* Gets the partition uri.
* @param partitionInfo partition
* @return uri
*/
public String getUri(final PartitionInfo partitionInfo) {
return partitionInfo.getSerde() == null ? null : partitionInfo.getSerde().getUri();
}
/**
* Gets the partition keys for the given table.
* @param table table info
* @return list of keys
*/
public List<String> partitionKeys(final Table table) {
List<String> result = Lists.newArrayList();
if (table.getLocation() != null) {
final Schema schema = table.getLocation().getSchema();
if (schema != null) {
final List<Field> fields = schema.getFields();
result = fields.stream().filter(Field::isPartitionKey).map(Field::getName).collect(Collectors.toList());
}
}
return result;
}
}
| 9,634 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.model.Database;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.util.List;
import java.util.stream.Collectors;
/**
* S3 Connector Database Service implementation.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorDatabaseService implements ConnectorDatabaseService {
private final SourceDao sourceDao;
private final DatabaseDao databaseDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param databaseDao database DAO impl
* @param sourceDao catalog/source DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorDatabaseService(@Named("catalogName") final String catalogName, final DatabaseDao databaseDao,
final SourceDao sourceDao, final S3ConnectorInfoConverter infoConverter) {
this.databaseDao = databaseDao;
this.sourceDao = sourceDao;
this.infoConverter = infoConverter;
this.catalogName = catalogName;
}
@Override
public List<QualifiedName> listViewNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName databaseName) {
return Lists.newArrayList();
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo databaseInfo) {
final String databaseName = databaseInfo.getName().getDatabaseName();
log.debug("Start: Create database {}", databaseInfo.getName());
Preconditions.checkNotNull(databaseName, "Database name is null");
if (databaseDao.getBySourceDatabaseName(catalogName, databaseName) != null) {
log.warn("Database {} already exists", databaseName);
throw new DatabaseAlreadyExistsException(databaseInfo.getName());
}
final Database database = new Database();
database.setName(databaseName);
database.setSource(sourceDao.getByName(catalogName));
databaseDao.save(database);
log.debug("End: Create database {}", databaseInfo.getName());
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo databaseInfo) {
// no op
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete database {}", name);
final String databaseName = name.getDatabaseName();
Preconditions.checkNotNull(databaseName, "Database name is null");
final Database database = databaseDao.getBySourceDatabaseName(catalogName, databaseName);
if (database == null) {
throw new DatabaseNotFoundException(name);
} else if (database.getTables() != null && !database.getTables().isEmpty()) {
throw new ConnectorException("Database " + databaseName + " is not empty. One or more tables exist.", null);
}
databaseDao.delete(database);
log.debug("End: Delete database {}", name);
}
@Override
public DatabaseInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final String databaseName = name.getDatabaseName();
Preconditions.checkNotNull(databaseName, "Database name is null");
log.debug("Get database {}", name);
final Database database = databaseDao.getBySourceDatabaseName(catalogName, databaseName);
if (database == null) {
throw new DatabaseNotFoundException(name);
}
return infoConverter.toDatabaseInfo(QualifiedName.ofCatalog(catalogName), database);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return databaseDao.getBySourceDatabaseName(catalogName, name.getDatabaseName()) != null;
}
@Override
public List<DatabaseInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List databases for catalog {} and database with prefix {}", name, prefix);
return databaseDao.searchBySourceDatabaseName(catalogName, prefix == null ? "" : prefix.getTableName(),
sort, pageable).stream().map(d -> infoConverter.toDatabaseInfo(name, d)).collect(
Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("List database names for catalog {} and database with prefix {}", name, prefix);
return databaseDao.searchBySourceDatabaseName(catalogName, prefix == null ? "" : prefix.getTableName(),
sort, pageable).stream().map(d -> QualifiedName.ofDatabase(catalogName, d.getName())).collect(
Collectors.toList());
}
@Override
public void rename(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
log.debug("Start: Rename database {} with {}", oldName, newName);
final String newDatabaseName = newName.getDatabaseName();
Preconditions.checkNotNull(newDatabaseName, "Database name is null");
final Database oldDatabase = databaseDao.getBySourceDatabaseName(catalogName, oldName.getDatabaseName());
if (oldDatabase == null) {
throw new DatabaseNotFoundException(oldName);
}
if (databaseDao.getBySourceDatabaseName(catalogName, newDatabaseName) != null) {
throw new DatabaseAlreadyExistsException(newName);
}
oldDatabase.setName(newDatabaseName);
databaseDao.save(oldDatabase);
log.debug("End: Rename database {} with {}", oldName, newName);
}
}
| 9,635 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.s3;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.inject.persist.Transactional;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.model.BaseInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Partition;
import com.netflix.metacat.connector.s3.model.Table;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import java.io.StringReader;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* S3 Connector implementation for partitions.
*
* @author amajumdar
*/
@Transactional
@Slf4j
public class S3ConnectorPartitionService implements ConnectorPartitionService {
private static final String FIELD_DATE_CREATED = "dateCreated";
private static final String FIELD_BATCHID = "batchid";
private final TableDao tableDao;
private final PartitionDao partitionDao;
private final S3ConnectorInfoConverter infoConverter;
private final String catalogName;
/**
* Constructor.
*
* @param catalogName catalog name
* @param tableDao table DAO impl
* @param partitionDao partition DAO impl
* @param infoConverter Converter for the S3 resources
*/
@Inject
public S3ConnectorPartitionService(@Named("catalogName") final String catalogName, final TableDao tableDao,
final PartitionDao partitionDao, final S3ConnectorInfoConverter infoConverter) {
this.tableDao = tableDao;
this.partitionDao = partitionDao;
this.infoConverter = infoConverter;
this.catalogName = catalogName;
}
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Create partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(),
name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (!partitions.isEmpty()) {
throw new PartitionAlreadyExistsException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.toPartition(table, partitionInfo));
log.debug("End: Create partition {}", name);
}
private Table getTable(final QualifiedName tableName) {
final Table result = tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(),
tableName.getTableName());
if (result == null) {
throw new TableNotFoundException(tableName);
}
return result;
}
@Override
public List<PartitionInfo> getPartitions(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partitions for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true);
}
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Update partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(),
name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (partitions.isEmpty()) {
throw new PartitionNotFoundException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.fromPartitionInfo(partitionInfo));
log.debug("End: Update partition {}", name);
}
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Start: Delete partition {}", name);
partitionDao.deleteByNames(catalogName, name.getDatabaseName(), name.getTableName(),
Lists.newArrayList(name.getPartitionName()));
log.debug("End: Delete partition {}", name);
}
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(
partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(),
partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
private Map<String, Partition> getPartitionsByNames(final Long tableId,
final List<String> partitionNames) {
final List<Partition> partitions = partitionDao.getPartitions(tableId, partitionNames, null, null, null, null);
return partitions.stream().collect(Collectors.toMap(Partition::getName, partition -> partition));
}
@Override
public PartitionInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(), name.getTableName());
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (partitions.isEmpty()) {
throw new PartitionNotFoundException(tableName, name.getPartitionName());
}
log.debug("Get partition for table {}", tableName);
return infoConverter.toPartitionInfo(tableName, table, partitions.get(0));
}
@Override
public void deletePartitions(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName tableName,
@Nonnull final List<String> partitionNames,
final TableInfo tableInfo) {
log.debug("Start: Delete partitions {} for table {}", partitionNames, tableName);
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionNames);
log.debug("End: Delete partitions {} for table {}", partitionNames, tableName);
}
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
final Table table = tableDao.getBySourceDatabaseTableName(catalogName, name.getDatabaseName(),
name.getTableName());
if (table != null) {
result = !partitionDao.getPartitions(table.getId(),
Lists.newArrayList(name.getPartitionName()), null, null, null, null).isEmpty();
}
return result;
}
@Override
public int getPartitionCount(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName table,
final TableInfo tableInfo
) {
return partitionDao.count(catalogName, table.getDatabaseName(), table.getTableName()).intValue();
}
@Override
public List<PartitionInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("Get partitions for table {} with name prefix {}", name, prefix);
List<PartitionInfo> result = _getPartitions(name, null, null, sort, pageable, true);
if (prefix != null) {
result = result.stream().filter(p -> p.getName().getPartitionName().startsWith(prefix.getPartitionName()))
.collect(Collectors.toList());
}
return result;
}
@Override
public Map<String, List<QualifiedName>> getPartitionNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final List<String> uris,
final boolean prefixSearch) {
return partitionDao.getByUris(uris, prefixSearch).stream().collect(Collectors.groupingBy(Partition::getUri,
Collectors.mapping(p -> QualifiedName.ofPartition(
catalogName, p.getTable().getDatabase().getName(), p.getTable().getName(), p.getName()),
Collectors.toList())));
}
@Override
public List<String> getPartitionKeys(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partition keys for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true).stream()
.map(p -> p.getName().getPartitionName()).collect(Collectors.toList());
}
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
log.debug("Get partition names for table {} with prefix {}", name, prefix);
Stream<QualifiedName> result = _getPartitions(name, null, null, sort, pageable, true)
.stream().map(BaseInfo::getName);
if (prefix != null) {
result = result
.filter(partitionName -> partitionName.getPartitionName().startsWith(prefix.getPartitionName()));
}
return result.collect(Collectors.toList());
}
@Override
public List<String> getPartitionUris(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName tableName,
@Nonnull final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
log.debug("Get partition uris for table {}", tableName);
return _getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable(), true).stream()
.filter(p -> p.getSerde() != null && p.getSerde().getUri() != null)
.map(p -> p.getSerde().getUri()).collect(Collectors.toList());
}
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getPartitions(final QualifiedName tableName,
final String filterExpression,
final List<String> partitionIds,
final Sort sort,
final Pageable pageable,
final boolean includePartitionDetails) {
//
// Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes)
// will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so
//
final List<PartitionInfo> partitions = com.google.common.collect.Lists.newArrayList();
if (partitionIds != null && partitionIds.size() > 5000) {
final List<List<String>> subFilterPartitionNamesList = com.google.common.collect.Lists
.partition(partitionIds, 5000);
subFilterPartitionNamesList.forEach(
subPartitionIds -> partitions.addAll(_getConnectorPartitions(tableName, filterExpression,
subPartitionIds, sort, pageable, includePartitionDetails)));
} else {
partitions.addAll(_getConnectorPartitions(tableName, filterExpression, partitionIds, sort, pageable,
includePartitionDetails));
}
return partitions;
}
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getConnectorPartitions(final QualifiedName tableName,
final String filterExpression,
final List<String> partitionIds,
final Sort sort,
final Pageable pageable,
final boolean includePartitionDetails) {
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
// Support for dateCreated
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
// Table
final Table table = getTable(tableName);
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
final List<Partition> partitions = partitionDao
.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort,
Strings.isNullOrEmpty(filterExpression) ? pageable : null);
final FilterPartition filter = new FilterPartition();
List<PartitionInfo> result = partitions.stream().filter(partition -> {
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + "");
}
return Strings.isNullOrEmpty(filterExpression)
|| filter
.evaluatePartitionExpression(filterExpression, partition.getName(), partition.getUri(), isBatched,
values);
}).map(partition -> infoConverter.toPartitionInfo(tableName, table, partition)).collect(Collectors.toList());
//
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
int limit = pageable.getOffset() + pageable.getLimit();
if (result.size() < limit) {
limit = result.size();
}
if (pageable.getOffset() > limit) {
result = Lists.newArrayList();
} else {
result = result.subList(pageable.getOffset(), limit);
}
}
return result;
}
private String getDateCreatedSqlCriteria(final String filterExpression) {
final StringBuilder result = new StringBuilder();
Collection<String> values = com.google.common.collect.Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
values = (Collection<String>) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(new PartitionParamParserEval(), null);
} catch (Throwable ignored) {
//
}
}
for (String value : values) {
if (result.length() != 0) {
result.append(" and ");
}
result.append(value.replace("dateCreated", "to_seconds(p.date_created)"));
}
return result.toString();
}
private Collection<String> getSinglePartitionExprs(final String filterExpression) {
Collection<String> result = com.google.common.collect.Lists.newArrayList();
if (!Strings.isNullOrEmpty(filterExpression)) {
try {
result = (Collection<String>) new PartitionParser(new StringReader(filterExpression)).filter()
.jjtAccept(new PartitionKeyParserEval(), null);
} catch (Throwable ignored) {
//
}
}
if (result != null) {
result = result.stream().filter(s -> !(s.startsWith("batchid=") || s.startsWith("dateCreated="))).collect(
Collectors.toList());
}
return result;
}
}
| 9,636 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/S3ConnectorPlugin.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.connector.pig.converters.PigTypeConverter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* S3 plugin.
*/
public class S3ConnectorPlugin implements ConnectorPlugin {
/**
* Type of the connector.
*/
public static final String CONNECTOR_TYPE = "s3";
private static final PigTypeConverter PIG_TYPE_CONVERTER = new PigTypeConverter();
private static final ConnectorInfoConverter INFO_CONVERTER_S3 =
new S3ConnectorInfoConverter(PIG_TYPE_CONVERTER, true, TypeRegistry.getTypeRegistry());
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new S3ConnectorFactory(connectorContext.getCatalogName(), connectorContext.getCatalogShardName(),
connectorContext.getConfiguration(), (S3ConnectorInfoConverter) getInfoConverter());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return PIG_TYPE_CONVERTER;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorInfoConverter getInfoConverter() {
return INFO_CONVERTER_S3;
}
}
| 9,637 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 connector classes.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3;
| 9,638 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/TableDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Table;
import java.util.List;
import java.util.Map;
/**
* Table DAO.
*/
public interface TableDao extends BaseDao<Table> {
/**
* Get a table.
* @param sourceName source name
* @param databaseName database name
* @param tableName table name
* @return table
*/
Table getBySourceDatabaseTableName(String sourceName, String databaseName, String tableName);
/**
* Get list of tables.
* @param sourceName source name
* @param databaseName database name
* @param tableNames table names
* @return tables
*/
List<Table> getBySourceDatabaseTableNames(String sourceName, String databaseName, List<String> tableNames);
/**
* Get list of databases for the given source name and database name prefix.
* @param sourceName source name
* @param databaseName database name
* @param tableNamePrefix table name prefix
* @param sort sort
* @param pageable pageable
* @return list of tables
*/
List<Table> searchBySourceDatabaseTableName(String sourceName, String databaseName, String tableNamePrefix,
Sort sort, Pageable pageable);
/**
* Gets the names of the tables for the given uris.
* @param sourceName source name
* @param uris list of uri paths
* @param prefixSearch if true, will do a prefix search
* @return Map of uri to list of table names
*/
Map<String, List<QualifiedName>> getByUris(String sourceName, List<String> uris, boolean prefixSearch);
}
| 9,639 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/PartitionDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Partition;
import java.util.List;
/**
* Partition DAO.
*/
public interface PartitionDao extends BaseDao<Partition> {
/**
* Get the list of partitions.
* @param tableId table id
* @param partitionIds partition names
* @param partitionParts parts
* @param dateCreatedSqlCriteria criteria
* @param sort sort
* @param pageable pageable
* @return list of partitions
*/
List<Partition> getPartitions(Long tableId, List<String> partitionIds, Iterable<String> partitionParts,
String dateCreatedSqlCriteria, Sort sort, Pageable pageable);
/**
* Deletes the partitions for the given table and list of partition ids.
* @param sourceName catalog/source name
* @param databaseName schema/database name
* @param tableName table name
* @param partitionIds list of partition ids
*/
void deleteByNames(String sourceName, String databaseName, String tableName, List<String> partitionIds);
/**
* Returns the number of partitions for the given table.
* @param sourceName catalog/source name
* @param databaseName schema/database name
* @param tableName table name
* @return number of partitions
*/
Long count(String sourceName, String databaseName, String tableName);
/**
* Returns the list of partitions with the given uri.
* @param uris uri paths
* @param prefixSearch true, if the given uri is partial
* @return list of partitions
*/
List<Partition> getByUris(List<String> uris, boolean prefixSearch);
}
| 9,640 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/BaseDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import java.util.List;
/**
* The base dao.
* @param <T> model entity type.
*/
public interface BaseDao<T> {
/**
* Save the entity to the data store.
* @param entity the entity to save.
* @return entity itself after being saved
*/
T save(T entity);
/**
* Save the entity and refresh the entity from
* the database if required.
*
* @param entity the entity to be saved and refreshed.
*
* @param isRefreshRequired {@code true} to perform a refresh from the store.
* @return entity itself
*/
T save(T entity, boolean isRefreshRequired);
/**
* Saves all given entities.
*
* @param entities list of entities to save
* @return the saved entities
* @throws IllegalArgumentException in case the given entity is (@literal null}.
*/
List<T> save(Iterable<T> entities);
/**
* Delete the entity by using the id.
* @param id the id of the entity.
*/
void deleteById(Long id);
/**
* Delete the entities for the given ids.
* @param ids list of ids.
*/
void deleteById(Iterable<Long> ids);
/**
* Delete the given entity.
* @param entity entity to delete
*/
void delete(T entity);
/**
* Delete the given entities.
* @param entities list of entities to delete
*/
void delete(Iterable<T> entities);
/**
* Deletes all entities managed by the repository.
*/
void deleteAll();
/**
* Returns whether an entity with the given id exists.
* @param id must not be {@literal null}.
* @return true if an entity with the given id exists, {@literal false} otherwise
* @throws IllegalArgumentException if {@code id} is {@literal null}
*/
boolean isExists(Long id);
/**
* Returns an entity for the given id.
* @param id id of the entity
* @return Returns an entity for the given id
*/
T get(Long id);
/**
* Returns an entity for the given name.
* @param name name of the entity
* @return Returns an entity for the given name
*/
T getByName(String name);
/**
* Returns a list of entities for the given names.
* @param names names of the entities
* @return Returns a list of entities for the given names
*/
List<T> getByNames(List<String> names);
/**
* Returns the list of entities for the given ids.
* @param ids list of ids
* @return Returns the list of entities for the given ids
*/
List<T> get(Iterable<Long> ids);
/**
* Returns all the instances.
* @return Returns all the instances
*/
List<T> getAll();
/**
* Returns the number of entities available.
*
* @return the number of entities
*/
long count();
}
| 9,641 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/SourceDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.connector.s3.model.Source;
/**
* Source DAO.
*/
public interface SourceDao extends BaseDao<Source> {
/**
* Get source.
* @param name name
* @param fromCache if true, it will be fetched from cache.
* @return source
*/
Source getByName(String name, boolean fromCache);
}
| 9,642 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/DatabaseDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.model.Database;
import java.util.List;
/**
* Database DAO.
*/
public interface DatabaseDao extends BaseDao<Database> {
/**
* Get database for the given source and database name.
* @param sourceName source name
* @param databaseName database name
* @return Database
*/
Database getBySourceDatabaseName(String sourceName, String databaseName);
/**
* Get list of databases for the given source name and database names.
* @param sourceName source name
* @param databaseNames list of database names
* @return list of databases
*/
List<Database> getBySourceDatabaseNames(String sourceName, List<String> databaseNames);
/**
* Get list of databases for the given source name and database name prefix.
* @param sourceName source name
* @param databaseNamePrefix database name prefix
* @param sort sort
* @param pageable pageable
* @return list of databases
*/
List<Database> searchBySourceDatabaseName(String sourceName, String databaseNamePrefix,
Sort sort, Pageable pageable);
}
| 9,643 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/FieldDao.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao;
import com.netflix.metacat.connector.s3.model.Field;
/**
* Field DAO.
*/
public interface FieldDao extends BaseDao<Field> {
}
| 9,644 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 dao interfaces.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3.dao;
| 9,645 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/PartitionDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.PartitionDao;
import com.netflix.metacat.connector.s3.model.Partition;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.Query;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Partition DAO impl.
*/
public class PartitionDaoImpl extends IdEntityDaoImpl<Partition> implements PartitionDao {
private static final String SQL_GET_PARTITIONS = "select * from partition_table as p where p.table_id=:tableId";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public PartitionDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Partition> getEntityClass() {
return Partition.class;
}
/**
* Gets the partitions.
* @param tableId table id
* @param partitionIds partition names
* @param partitionParts parts
* @param dateCreatedSqlCriteria criteria
* @param sort sort
* @param pageable pageable
* @return list of partitions
*/
public List<Partition> getPartitions(final Long tableId, final List<String> partitionIds,
final Iterable<String> partitionParts, final String dateCreatedSqlCriteria,
final Sort sort, final Pageable pageable) {
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITIONS);
if (partitionIds != null && !partitionIds.isEmpty()) {
queryBuilder.append(" and p.name in ('")
.append(Joiner.on("','").skipNulls().join(partitionIds))
.append("')");
}
if (partitionParts != null) {
for (String singlePartitionExpr : partitionParts) {
queryBuilder.append(" and p.name like '%").append(singlePartitionExpr).append("%'");
}
}
if (!Strings.isNullOrEmpty(dateCreatedSqlCriteria)) {
queryBuilder.append(" and ").append(dateCreatedSqlCriteria);
}
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
if (pageable != null && pageable.isPageable()) {
queryBuilder.append(" limit ").append(pageable.getOffset()).append(',').append(pageable.getLimit());
}
// entityManager
final EntityManager entityManager = em.get();
final Query pQuery = entityManager.createNativeQuery(queryBuilder.toString(), Partition.class);
pQuery.setParameter("tableId", tableId);
return pQuery.getResultList();
}
@Override
public void deleteByNames(final String sourceName, final String databaseName, final String tableName,
final List<String> partitionNames) {
final Query query = em.get().createNamedQuery(Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableName", tableName);
query.setParameter("partitionNames", partitionNames);
query.executeUpdate();
}
@Override
public Long count(final String sourceName, final String databaseName, final String tableName) {
final TypedQuery<Long> query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_COUNT_FOR_TABLE,
Long.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableName", tableName);
return query.getSingleResult();
}
@Override
public List<Partition> getByUris(final List<String> uris, final boolean prefixSearch) {
TypedQuery<Partition> query = null;
if (prefixSearch) {
final StringBuilder builder = new StringBuilder("select p from Partition p where 1=2");
uris.forEach(uri -> builder.append(" or uri like '").append(uri).append("%'"));
query = em.get().createQuery(builder.toString(), Partition.class);
} else {
query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_BY_URI, Partition.class);
query.setParameter("uris", uris);
}
return query.getResultList();
}
}
| 9,646 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/TableDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.TableDao;
import com.netflix.metacat.connector.s3.model.Table;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.Query;
import javax.persistence.TypedQuery;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Table DAO impl.
*/
public class TableDaoImpl extends IdEntityDaoImpl<Table> implements TableDao {
private static final String SQL_SEARCH_TABLES =
"select t from Table t where t.database.source.name=:sourceName and t.database.name=:databaseName"
+ " and (1=:isTableNameNull or t.name like :tableName)";
private static final String SQL_GET_TABLE_NAMES_BY_URIS =
"select d.name dname,t.name,uri from source s join database_object d on s.id=d.source_id join table_object t"
+ " on d.id=t.database_id join location l on t.id=l.table_id where s.name=:sourceName";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public TableDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Table> getEntityClass() {
return Table.class;
}
@Override
public Table getBySourceDatabaseTableName(final String sourceName, final String databaseName,
final String tableName) {
Table result = null;
final List<Table> tables = getBySourceDatabaseTableNames(sourceName, databaseName,
Lists.newArrayList(tableName));
if (!tables.isEmpty()) {
result = tables.get(0);
}
return result;
}
@Override
public List<Table> getBySourceDatabaseTableNames(final String sourceName, final String databaseName,
final List<String> tableNames) {
final TypedQuery<Table> query = em.get().createNamedQuery(Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES,
Table.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("tableNames", tableNames);
return query.getResultList();
}
@Override
public List<Table> searchBySourceDatabaseTableName(final String sourceName, final String databaseName,
final String tableNamePrefix, final Sort sort, final Pageable pageable) {
final StringBuilder queryBuilder = new StringBuilder(SQL_SEARCH_TABLES);
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
final TypedQuery<Table> query = em.get().createQuery(queryBuilder.toString(), Table.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseName", databaseName);
query.setParameter("isTableNameNull", tableNamePrefix == null ? 1 : 0);
query.setParameter("tableName", tableNamePrefix + "%");
if (pageable != null && pageable.isPageable()) {
query.setFirstResult(pageable.getOffset());
query.setMaxResults(pageable.getLimit());
}
return query.getResultList();
}
@Override
public Map<String, List<QualifiedName>> getByUris(final String sourceName, final List<String> uris,
final boolean prefixSearch) {
final StringBuilder builder = new StringBuilder(SQL_GET_TABLE_NAMES_BY_URIS);
if (prefixSearch) {
builder.append(" and ( 1=0");
uris.forEach(uri -> builder.append(" or uri like '").append(uri).append("%'"));
builder.append(")");
} else {
builder.append(" and uri in (:uris)");
}
final Query query = em.get().createNativeQuery(builder.toString());
query.setParameter("sourceName", sourceName);
if (!prefixSearch) {
query.setParameter("uris", uris);
}
final List<Object[]> result = query.getResultList();
return result.stream().collect(Collectors.groupingBy(o -> (String) o[2], Collectors
.mapping(o -> QualifiedName.ofTable(sourceName, (String) o[0], (String) o[1]), Collectors.toList())));
}
}
| 9,647 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/IdEntityDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.connector.s3.dao.BaseDao;
import com.netflix.metacat.connector.s3.model.IdEntity;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.criteria.CriteriaBuilder;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Root;
import java.util.List;
/**
* Id Entity DAO.
* @param <T> model entity type.
*/
public abstract class IdEntityDaoImpl<T extends IdEntity> extends BaseDaoImpl<T> implements
BaseDao<T> {
/**
* Constructor.
* @param em entity manager
*/
protected IdEntityDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
public List<T> get(final Iterable<Long> ids) {
final EntityManager entityManager = em.get();
final CriteriaBuilder cb = entityManager.getCriteriaBuilder();
final CriteriaQuery<T> criteriaQuery = cb.createQuery(getEntityClass());
final Root<T> root = criteriaQuery.from(getEntityClass());
criteriaQuery.where(root.get("id").in(Lists.newArrayList(ids)));
return entityManager.createQuery(criteriaQuery).getResultList();
}
@Override
protected boolean isNew(final T entity) {
return entity.getId() == null;
}
}
| 9,648 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/BaseDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.metacat.connector.s3.dao.BaseDao;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Base DAO implementation.
* @param <T> model entity type.
*/
public abstract class BaseDaoImpl<T> implements BaseDao<T> {
private static final String SQL_GET_BY_NAME = "select a from %s a where name=:name";
private static final String SQL_GET_BY_NAMES = "select a from %s a where name in (:names)";
protected Provider<EntityManager> em;
protected BaseDaoImpl(final Provider<EntityManager> em) {
this.em = em;
}
protected abstract Class<T> getEntityClass();
@Override
public T save(final T entity) {
return save(entity, false);
}
protected abstract boolean isNew(T entity);
@Override
public T save(final T entity, final boolean flush) {
T result = null;
final EntityManager entityManager = em.get();
if (isNew(entity)) {
entityManager.persist(entity);
result = entity;
} else {
result = entityManager.merge(entity);
}
if (flush) {
entityManager.flush();
}
return result;
}
@Override
public List<T> save(final Iterable<T> entities) {
final List<T> result = Lists.newArrayList();
if (entities != null) {
for (T entity : entities) {
result.add(save(entity));
}
}
return result;
}
@Override
public void deleteById(final Long id) {
Preconditions.checkArgument(id != null, "Id cannot be null.");
final T entity = get(id);
if (entity != null) {
delete(entity);
}
}
@Override
public void deleteById(final Iterable<Long> ids) {
Preconditions.checkArgument(ids != null, "Ids cannot be null.");
for (Long id : ids) {
deleteById(id);
}
}
@Override
public void delete(final T entity) {
Preconditions.checkArgument(entity != null, "Entity cannot be null.");
final EntityManager entityManager = em.get();
entityManager.remove(entity);
}
@Override
public void delete(final Iterable<T> entities) {
Preconditions.checkArgument(entities != null, "Entities cannot be null.");
for (T entity : entities) {
delete(entity);
}
}
@Override
public void deleteAll() {
em.get().createQuery("delete from " + getEntityClass().getName()).executeUpdate();
}
@Override
public boolean isExists(final Long id) {
return get(id) != null;
}
@Override
public T get(final Long id) {
Preconditions.checkArgument(id != null, "Id cannot be null.");
return em.get().find(getEntityClass(), id);
}
@Override
@SuppressFBWarnings
public T getByName(final String name) {
T result = null;
Preconditions.checkArgument(name != null, "Name cannot be null.");
final TypedQuery<T> query = em.get()
.createQuery(String.format(SQL_GET_BY_NAME, getEntityClass().getName()), getEntityClass());
query.setParameter("name", name);
try {
result = query.getSingleResult();
} catch (Exception ignored) { }
return result;
}
@Override
public List<T> getByNames(final List<String> names) {
List<T> result = Lists.newArrayList();
if (names != null && !names.isEmpty()) {
final TypedQuery<T> query = em.get()
.createQuery(String.format(SQL_GET_BY_NAMES, getEntityClass().getName()), getEntityClass());
query.setParameter("names", names);
result = query.getResultList();
}
return result;
}
@Override
public List<T> get(final Iterable<Long> ids) {
final List<T> result = Lists.newArrayList();
for (Long id : ids) {
result.add(get(id));
}
return result;
}
@Override
@SuppressWarnings("unchecked")
public List<T> getAll() {
return em.get().createQuery("select a from " + getEntityClass().getName() + " a").getResultList();
}
@Override
public long count() {
return (long) em.get().createQuery("select count(a) from " + getEntityClass().getName() + " a")
.getSingleResult();
}
}
| 9,649 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/DatabaseDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.connector.s3.dao.DatabaseDao;
import com.netflix.metacat.connector.s3.model.Database;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import java.util.List;
/**
* Database DAO implementation.
*/
public class DatabaseDaoImpl extends IdEntityDaoImpl<Database> implements DatabaseDao {
private static final String SQL_SEARCH_DATABASES =
"select d from Database d where d.source.name=:sourceName and (1=:isNameNull or d.name like :databaseName)";
/**
* Constructor.
* @param em entity manager
*/
@Inject
public DatabaseDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Database> getEntityClass() {
return Database.class;
}
@Override
public Database getBySourceDatabaseName(final String sourceName, final String databaseName) {
Database result = null;
final List<Database> databases = getBySourceDatabaseNames(sourceName, Lists.newArrayList(databaseName));
if (!databases.isEmpty()) {
result = databases.get(0);
}
return result;
}
@Override
public List<Database> getBySourceDatabaseNames(final String sourceName, final List<String> databaseNames) {
final TypedQuery<Database> query = em.get().createNamedQuery(Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES,
Database.class);
query.setParameter("sourceName", sourceName);
query.setParameter("databaseNames", databaseNames);
return query.getResultList();
}
@Override
public List<Database> searchBySourceDatabaseName(final String sourceName, final String databaseNamePrefix,
final Sort sort, final Pageable pageable) {
final StringBuilder queryBuilder = new StringBuilder(SQL_SEARCH_DATABASES);
if (sort != null && sort.hasSort()) {
queryBuilder.append(" order by ").append(sort.getSortBy()).append(" ").append(sort.getOrder().name());
}
// entityManager
final EntityManager entityManager = em.get();
final TypedQuery<Database> pQuery = entityManager.createQuery(queryBuilder.toString(), Database.class);
pQuery.setParameter("sourceName", sourceName);
pQuery.setParameter("isNameNull", databaseNamePrefix == null ? 1 : 0);
pQuery.setParameter("databaseName", databaseNamePrefix + "%");
if (pageable != null && pageable.isPageable()) {
pQuery.setFirstResult(pageable.getOffset());
pQuery.setMaxResults(pageable.getLimit());
}
return pQuery.getResultList();
}
}
| 9,650 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/FieldDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.netflix.metacat.connector.s3.dao.FieldDao;
import com.netflix.metacat.connector.s3.model.Field;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
/**
* Field DAO impl.
*/
public class FieldDaoImpl extends IdEntityDaoImpl<Field> implements FieldDao {
/**
* Constructor.
* @param em entity manager
*/
@Inject
public FieldDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Field> getEntityClass() {
return Field.class;
}
}
| 9,651 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/SourceDaoImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.dao.impl;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.netflix.metacat.common.server.connectors.exception.CatalogNotFoundException;
import com.netflix.metacat.connector.s3.dao.SourceDao;
import com.netflix.metacat.connector.s3.model.Source;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.persistence.EntityManager;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* Source DAO impl.
*/
public class SourceDaoImpl extends IdEntityDaoImpl<Source> implements SourceDao {
private LoadingCache<String, Source> sourceCache = CacheBuilder.newBuilder().expireAfterWrite(120, TimeUnit.MINUTES)
.build(
new CacheLoader<String, Source>() {
@Override
public Source load(final String name) throws Exception {
return loadSource(name);
}
});
/**
* Constructor.
* @param em entity manager
*/
@Inject
public SourceDaoImpl(final Provider<EntityManager> em) {
super(em);
}
@Override
protected Class<Source> getEntityClass() {
return Source.class;
}
private Source loadSource(final String name) {
return super.getByName(name);
}
@Override
public Source getByName(final String name) {
Source result = null;
try {
result = sourceCache.get(name);
} catch (ExecutionException ignored) {
//
}
if (result == null) {
throw new CatalogNotFoundException(name);
}
return result;
}
@Override
public Source getByName(final String name, final boolean fromCache) {
if (!fromCache) {
sourceCache.invalidate(name);
}
return getByName(name);
}
}
| 9,652 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/dao/impl/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 dao implementations.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3.dao.impl;
| 9,653 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Info.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CollectionTable;
import javax.persistence.Column;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.JoinColumn;
import javax.persistence.MapKeyColumn;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
import java.util.Map;
/**
* Info.
*/
@Entity
@javax.persistence.Table(name = "info",
uniqueConstraints = @UniqueConstraint(name = "info_u1", columnNames = "location_id"))
public class Info extends IdEntity {
private String inputFormat;
private String outputFormat;
private String serializationLib;
private String owner;
private Map<String, String> parameters;
private Location location;
@Column(name = "input_format")
public String getInputFormat() {
return inputFormat;
}
public void setInputFormat(final String inputFormat) {
this.inputFormat = inputFormat;
}
@Column(name = "output_format")
public String getOutputFormat() {
return outputFormat;
}
public void setOutputFormat(final String outputFormat) {
this.outputFormat = outputFormat;
}
@Column(name = "serialization_lib")
public String getSerializationLib() {
return serializationLib;
}
public void setSerializationLib(final String serializationLib) {
this.serializationLib = serializationLib;
}
@Column(name = "owner")
public String getOwner() {
return owner;
}
public void setOwner(final String owner) {
this.owner = owner;
}
@ElementCollection
@MapKeyColumn(name = "parameters_idx")
@Column(name = "parameters_elt")
@CollectionTable(name = "info_parameters")
public Map<String, String> getParameters() {
return parameters;
}
public void setParameters(final Map<String, String> parameters) {
this.parameters = parameters;
}
@OneToOne
@JoinColumn(name = "location_id", nullable = false)
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
}
| 9,654 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Schema.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.OneToMany;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Schema.
*/
@Entity
@javax.persistence.Table(name = "schema_object",
uniqueConstraints = @UniqueConstraint(name = "schema_object_u1", columnNames = "location_id"))
public class Schema extends IdEntity {
private Location location;
private List<Field> fields;
@OneToOne
@JoinColumn(name = "location_id", nullable = false)
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "schema")
public List<Field> getFields() {
return fields;
}
public void setFields(final List<Field> fields) {
this.fields = fields;
}
}
| 9,655 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Table.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
/**
* Table.
*/
@Entity
@javax.persistence.Table(name = "table_object",
indexes = { @Index(name = "table_object_i1", columnList = "name") },
uniqueConstraints = @UniqueConstraint(name = "table_object_u1", columnNames = { "database_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES,
query = "select t from Table t where t.database.source.name=:sourceName and t.database.name=:databaseName"
+ " and t.name in (:tableNames)"
)
})
public class Table extends BaseTable {
/** Query name to get table for the given source, database and table names. */
public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES = "getBySourceDatabaseTableNames";
private Database database;
private Location location;
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "database_id", nullable = false)
public Database getDatabase() {
return database;
}
public void setDatabase(final Database database) {
this.database = database;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "table")
public Location getLocation() {
return location;
}
public void setLocation(final Location location) {
this.location = location;
}
}
| 9,656 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Database.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Database.
*/
@Entity
@javax.persistence.Table(name = "database_object",
indexes = @Index(name = "database_object_i1", columnList = "name"),
uniqueConstraints = @UniqueConstraint(name = "database_object_u1", columnNames = { "source_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES,
query = "select d from Database d where d.source.name=:sourceName and d.name in (:databaseNames)"
)
})
public class Database extends IdEntity {
/** Named query name. */
public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES = "getBySourceDatabaseNames";
private String name;
private List<Table> tables;
private Source source;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "database")
public List<Table> getTables() {
return tables;
}
public void setTables(final List<Table> tables) {
this.tables = tables;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "source_id", nullable = false)
public Source getSource() {
return source;
}
public void setSource(final Source source) {
this.source = source;
}
}
| 9,657 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/BaseEntity.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import org.joda.time.Instant;
import javax.persistence.Column;
import javax.persistence.MappedSuperclass;
import javax.persistence.PrePersist;
import javax.persistence.PreUpdate;
import java.sql.Timestamp;
import java.util.Date;
/**
* {@code BaseEntity} is the entity that all entities.
*/
@MappedSuperclass
public class BaseEntity {
/** The date of creation. */
protected Date createdDate;
/** The last updated date. */
protected Date lastUpdatedDate;
/**
* Get the date and time of the entity creation.
*
* @return
* The date and time of the creation
*/
@Column(name = "date_created", insertable = true, updatable = false, nullable = false)
public Date getCreatedDate() {
return createdDate;
}
/**
* Set the date and time of the creation.
*
* @param createdDate
* The date and time of the creation
*/
public void setCreatedDate(final Date createdDate) {
this.createdDate = createdDate;
}
public void setCreatedDate(final Timestamp createdDate) {
this.createdDate = createdDate;
}
/**
* Get the date and time of the last update.
*
* @return
* Get the date and time of the last update.
*/
@Column(name = "last_updated", insertable = true, updatable = true, nullable = false)
public Date getLastUpdatedDate() {
return lastUpdatedDate;
}
/**
* Set the date and time of the last update.
*
* @param lastUpdatedDate
* The date and time of the last update
*/
public void setLastUpdatedDate(final Date lastUpdatedDate) {
this.lastUpdatedDate = lastUpdatedDate;
}
public void setLastUpdatedDate(final Timestamp lastUpdatedDate) {
this.lastUpdatedDate = lastUpdatedDate;
}
/**
* Insert.
*/
@PrePersist
public void onInsert() {
if (createdDate == null) {
setCreatedDate(Instant.now().toDate());
}
if (lastUpdatedDate == null) {
setLastUpdatedDate(Instant.now().toDate());
}
}
@PreUpdate
void onUpdate() {
if (lastUpdatedDate == null) {
setLastUpdatedDate(Instant.now().toDate());
}
}
/**
* Validate the entity for valid values.
*/
public void validate() {
}
}
| 9,658 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/IdEntity.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.Column;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.MappedSuperclass;
import javax.persistence.Version;
/**
* IdEntity.
*/
@MappedSuperclass
public class IdEntity extends BaseEntity {
private Long id;
private Long version;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id", unique = true, nullable = false)
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
@Version
@Column(name = "version", nullable = false)
public Long getVersion() {
return version;
}
public void setVersion(final Long version) {
this.version = version;
}
}
| 9,659 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/BaseTable.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.FetchType;
import javax.persistence.MappedSuperclass;
import javax.persistence.OneToMany;
import java.util.List;
/**
* Base Table.
*/
@MappedSuperclass
public abstract class BaseTable extends IdEntity {
private String name;
private List<Partition> partitions;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "table")
public List<Partition> getPartitions() {
return partitions;
}
public void setPartitions(final List<Partition> partitions) {
this.partitions = partitions;
}
}
| 9,660 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Location.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
/**
* Location.
*/
@Entity
@javax.persistence.Table(name = "location",
uniqueConstraints = @UniqueConstraint(name = "location_u1", columnNames = "table_id"))
public class Location extends IdEntity {
/*
static belongsTo = [table: Table]
static hasOne = [schema: Schema, info: Info]
//TODO: Serde info
String uri
*/
private String uri;
private Table table;
private Schema schema;
private Info info;
@Column(name = "uri", nullable = true)
public String getUri() {
return uri;
}
public void setUri(final String uri) {
this.uri = uri;
}
@OneToOne
@JoinColumn(name = "table_id", nullable = false)
public Table getTable() {
return table;
}
public void setTable(final Table table) {
this.table = table;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Schema getSchema() {
return schema;
}
public void setSchema(final Schema schema) {
this.schema = schema;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Info getInfo() {
return info;
}
public void setInfo(final Info info) {
this.info = info;
}
}
| 9,661 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Partition.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.UniqueConstraint;
/**
* Partition.
*/
@Entity
@javax.persistence.Table(name = "partition_table",
indexes = { @Index(name = "partition_table_i1", columnList = "name"),
@Index(name = "partition_table_i2", columnList = "uri") },
uniqueConstraints = @UniqueConstraint(name = "partition_table_u1", columnNames = { "table_id", "name" }))
@NamedQueries({
@NamedQuery(
name = Partition.NAME_QUERY_GET_FOR_TABLE,
query = "select p from Partition p where p.table.name=:tableName and p.table.database.name=:databaseName"
+ " and p.table.database.source.name=:sourceName"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_COUNT_FOR_TABLE,
query = "select count(p) from Partition p where p.table.name=:tableName"
+ " and p.table.database.name=:databaseName and p.table.database.source.name=:sourceName"
),
@NamedQuery(
name = Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES,
query = "delete from Partition p where p.table.id = (select t.id from Table t where t.name=:tableName"
+ " and t.database.name=:databaseName and t.database.source.name=:sourceName)"
+ " and p.name in (:partitionNames)"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_BY_URI,
query = "select p from Partition p where p.uri in :uris"
),
@NamedQuery(
name = Partition.NAME_QUERY_GET_BY_URI_PREFIX,
query = "select p from Partition p where p.uri like :uri"
)
})
public class Partition extends IdEntity {
/** Query name to get partition for a given table. */
public static final String NAME_QUERY_GET_FOR_TABLE = "getForTable";
/** Query name to get partition count for a given table. */
public static final String NAME_QUERY_GET_COUNT_FOR_TABLE = "getCountForTable";
/** Query name to delete. */
public static final String NAME_QUERY_DELETE_BY_PARTITION_NAMES = "deleteByPartitionNames";
/** Query name to get partition for a given uri . */
public static final String NAME_QUERY_GET_BY_URI = "getByUri";
/** Query name to get partition for a given uri prefix. */
public static final String NAME_QUERY_GET_BY_URI_PREFIX = "getByUriPrefix";
private String name;
private String uri;
private Table table;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Column(name = "uri", nullable = false)
public String getUri() {
return uri;
}
public void setUri(final String uri) {
this.uri = uri;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "table_id", nullable = false)
public Table getTable() {
return table;
}
public void setTable(final Table table) {
this.table = table;
}
}
| 9,662 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Source.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.OneToMany;
import javax.persistence.UniqueConstraint;
import java.util.List;
/**
* Source.
*/
@Entity
@javax.persistence.Table(name = "source",
uniqueConstraints = @UniqueConstraint(name = "source_u1", columnNames = "name"))
public class Source extends IdEntity {
private String name;
private String type;
private String thriftUri;
private boolean disabled;
private List<Database> databases;
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Column(name = "type", nullable = false)
public String getType() {
return type;
}
public void setType(final String type) {
this.type = type;
}
@Column(name = "thrift_uri")
public String getThriftUri() {
return thriftUri;
}
public void setThriftUri(final String thriftUri) {
this.thriftUri = thriftUri;
}
@Column(name = "disabled", nullable = false)
public boolean isDisabled() {
return disabled;
}
public void setDisabled(final boolean disabled) {
this.disabled = disabled;
}
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.LAZY, mappedBy = "source")
public List<Database> getDatabases() {
return databases;
}
public void setDatabases(final List<Database> databases) {
this.databases = databases;
}
}
| 9,663 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/Field.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.UniqueConstraint;
/**
* Field.
*/
@Entity
@javax.persistence.Table(name = "field",
uniqueConstraints = @UniqueConstraint(name = "field_u1", columnNames = { "schema_id", "name", "pos" }))
public class Field extends IdEntity {
private int pos;
private String name;
private String type;
private String sourceType;
private String comment;
private boolean partitionKey;
private Schema schema;
@Column(name = "pos", nullable = false)
public int getPos() {
return pos;
}
public void setPos(final int pos) {
this.pos = pos;
}
@Column(name = "name", nullable = false)
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
@Column(name = "type", nullable = false, length = 4000)
public String getType() {
return type;
}
public void setType(final String type) {
this.type = type;
}
@Column(name = "source_type", nullable = true)
public String getSourceType() {
return sourceType;
}
public void setSourceType(final String sourceType) {
this.sourceType = sourceType;
}
@Column(name = "comment", nullable = true)
public String getComment() {
return comment;
}
public void setComment(final String comment) {
this.comment = comment;
}
@Column(name = "partition_key", nullable = false)
public boolean isPartitionKey() {
return partitionKey;
}
public void setPartitionKey(final boolean partitionKey) {
this.partitionKey = partitionKey;
}
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@JoinColumn(name = "schema_id", nullable = false)
public Schema getSchema() {
return schema;
}
public void setSchema(final Schema schema) {
this.schema = schema;
}
}
| 9,664 |
0 |
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3
|
Create_ds/metacat/metacat-connector-s3/src/main/java/com/netflix/metacat/connector/s3/model/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes s3 connector model classes.
*
* @author amajumdar
*/
package com.netflix.metacat.connector.s3.model;
| 9,665 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/NameDateDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common;
import com.netflix.metacat.common.dto.BaseDto;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.util.Date;
/**
* DTO containing the qualified name and the audit info.
*
* @author amajumdar
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class NameDateDto extends BaseDto {
private static final long serialVersionUID = -5713826608609231492L;
@ApiModelProperty(value = "The date the entity was created")
private Date createDate;
@ApiModelProperty(value = "The date the entity was last updated")
private Date lastUpdated;
@ApiModelProperty(value = "The entity's name", required = true)
private QualifiedName name;
}
| 9,666 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/QualifiedName.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonValue;
import com.fasterxml.jackson.databind.JsonNode;
import com.netflix.metacat.common.dto.PartitionDto;
import lombok.Getter;
import lombok.NonNull;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* A fully qualified name that references a source of data.
*
* @author amajumdar
*/
@Getter
public final class QualifiedName implements Serializable {
private static final long serialVersionUID = -7916364073519921672L;
private static final String CATALOG_CDE_NAME_PREFIX = "cde_";
private final String catalogName;
private final String databaseName;
private final String partitionName;
private final String tableName;
private final String viewName;
private final Type type;
private QualifiedName(
@NonNull final String catalogName,
@Nullable final String databaseName,
@Nullable final String tableName,
@Nullable final String partitionName,
@Nullable final String viewName
) {
this.catalogName = standardizeRequired("catalogName", catalogName);
// TODO: Temporary hack to support a certain catalog that has mixed case naming.
final boolean forceLowerCase = !catalogName.startsWith(CATALOG_CDE_NAME_PREFIX);
this.databaseName = standardizeOptional(databaseName, forceLowerCase);
this.tableName = standardizeOptional(tableName, forceLowerCase);
this.partitionName = standardizeOptional(partitionName, false);
this.viewName = standardizeOptional(viewName, forceLowerCase);
if (this.databaseName.isEmpty() && (!this.tableName.isEmpty() || !this.partitionName.isEmpty())) {
throw new IllegalStateException("databaseName is not present but tableName or partitionName are present");
} else if (this.tableName.isEmpty() && !this.partitionName.isEmpty()) {
throw new IllegalStateException("tableName is not present but partitionName is present");
}
if (!this.viewName.isEmpty()) {
type = Type.MVIEW;
} else if (!this.partitionName.isEmpty()) {
type = Type.PARTITION;
} else if (!this.tableName.isEmpty()) {
type = Type.TABLE;
} else if (!this.databaseName.isEmpty()) {
type = Type.DATABASE;
} else {
type = Type.CATALOG;
}
}
private QualifiedName(
@NonNull final String catalogName,
@Nullable final String databaseName,
@Nullable final String tableName,
@Nullable final String partitionName,
@Nullable final String viewName,
@NonNull final Type type) {
this.catalogName = catalogName;
this.databaseName = databaseName;
this.partitionName = partitionName;
this.tableName = tableName;
this.viewName = viewName;
this.type = type;
}
/**
* Creates the name from the json.
*
* @param node json node
* @return qualified name
*/
@JsonCreator
public static QualifiedName fromJson(@NonNull final JsonNode node) {
final JsonNode catalogNameNode = node.path("catalogName");
if (catalogNameNode.isMissingNode() || catalogNameNode.isNull() || !catalogNameNode.isTextual()) {
// If catalogName is not present try to load from the qualifiedName node instead
final JsonNode nameNode = node.path("qualifiedName");
if (!nameNode.isNull() && nameNode.isTextual()) {
return fromString(nameNode.asText(), false);
} else {
// if neither are available throw an exception
throw new IllegalStateException("Node '" + node + "' is missing catalogName");
}
}
final String catalogName = catalogNameNode.asText();
final JsonNode databaseNameNode = node.path("databaseName");
String databaseName = null;
if (databaseNameNode != null) {
databaseName = databaseNameNode.asText();
}
final JsonNode tableNameNode = node.path("tableName");
String tableName = null;
if (tableNameNode != null) {
tableName = tableNameNode.asText();
}
final JsonNode partitionNameNode = node.path("partitionName");
String partitionName = null;
if (partitionNameNode != null) {
partitionName = partitionNameNode.asText();
}
final JsonNode viewNameNode = node.path("viewName");
String viewName = null;
if (viewNameNode != null) {
viewName = viewNameNode.asText();
}
return new QualifiedName(catalogName, databaseName, tableName, partitionName, viewName);
}
/**
* Creates the qualified name from text.
*
* @param s name
* @return qualified name
*/
public static QualifiedName fromString(@NonNull final String s) {
return fromString(s, false);
}
/**
* Creates the qualified name from text.
*
* @param s name
* @param isView true if it represents a view
* @return qualified name
*/
public static QualifiedName fromString(@NonNull final String s, final boolean isView) {
//noinspection ConstantConditions
final String name = s.trim();
if (name.isEmpty()) {
throw new IllegalArgumentException("passed in an empty definition name");
}
final String[] parts = name.split("/", 4);
switch (parts.length) {
case 1:
return ofCatalog(parts[0]);
case 2:
return ofDatabase(parts[0], parts[1]);
case 3:
return ofTable(parts[0], parts[1], parts[2]);
case 4:
if (isView || !parts[3].contains("=")) {
return ofView(parts[0], parts[1], parts[2], parts[3]);
} else {
return ofPartition(parts[0], parts[1], parts[2], parts[3]);
}
default:
throw new IllegalArgumentException("Unable to convert '" + s + "' into a qualifiedDefinition");
}
}
/**
* Returns a copy of this qualified name with the database/table/view names in upper case.
*
* @return QualifiedName
*/
public QualifiedName cloneWithUpperCase() {
return new QualifiedName(catalogName,
databaseName == null ? null : databaseName.toUpperCase(),
tableName == null ? null : tableName.toUpperCase(),
partitionName,
viewName == null ? null : viewName.toUpperCase(),
type);
}
/**
* Creates the qualified name representing a catalog.
*
* @param catalogName catalog name
* @return qualified name
*/
public static QualifiedName ofCatalog(@NonNull final String catalogName) {
return new QualifiedName(catalogName, null, null, null, null);
}
/**
* Creates the qualified name representing a database.
*
* @param catalogName catalog name
* @param databaseName database name
* @return qualified name
*/
public static QualifiedName ofDatabase(
@NonNull final String catalogName,
@NonNull final String databaseName
) {
return new QualifiedName(catalogName, databaseName, null, null, null);
}
/**
* Creates the qualified name representing a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return qualified name
*/
public static QualifiedName ofView(
@NonNull final String catalogName,
@NonNull final String databaseName,
@NonNull final String tableName,
@NonNull final String viewName
) {
return new QualifiedName(catalogName, databaseName, tableName, null, viewName);
}
/**
* Creates the qualified name representing a partition.
*
* @param tableName table name
* @param partitionDto partition
* @return qualified name
*/
public static QualifiedName ofPartition(
@NonNull final QualifiedName tableName,
@NonNull final PartitionDto partitionDto
) {
return ofPartition(
tableName.catalogName,
tableName.databaseName,
tableName.tableName,
partitionDto.getName().getPartitionName()
);
}
/**
* Creates the qualified name representing a partition.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionName partition name
* @return qualified name
*/
public static QualifiedName ofPartition(
@NonNull final String catalogName,
@NonNull final String databaseName,
@NonNull final String tableName,
@NonNull final String partitionName
) {
return new QualifiedName(catalogName, databaseName, tableName, partitionName, null);
}
/**
* Creates the qualified name representing a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return qualified name
*/
public static QualifiedName ofTable(
@NonNull final String catalogName,
@NonNull final String databaseName,
@NonNull final String tableName
) {
return new QualifiedName(catalogName, databaseName, tableName, null, null);
}
/**
* Creates a wild card string format of the qualified name.
*
* @param sourceName catalog/source name
* @param databaseName database name
* @param tableName table name
* @return wild card string format of the qualified name
*/
public static String toWildCardString(
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName
) {
if (sourceName == null && databaseName == null && tableName == null) {
return null;
}
final StringBuilder builder = new StringBuilder();
if (sourceName != null) {
builder.append(sourceName);
} else {
builder.append('%');
}
if (databaseName != null) {
builder.append('/').append(databaseName);
} else {
builder.append("/%");
}
if (tableName != null) {
builder.append('/').append(tableName);
} else {
builder.append("/%");
}
builder.append('%');
return builder.toString();
}
/**
* Change the qualified name query parameter to wildcard query string to allow source/database/table
* like queries. It uses '%' to represent the other field if not provided. e.g.
* query database like string is '%/database/%'
* query catalog and database like string is 'catalog/database/%'
*
* @param sourceName source name
* @param databaseName database name
* @param tableName table name
* @return query string
*/
public static String qualifiedNameToWildCardQueryString(
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName
) {
if (sourceName == null && databaseName == null && tableName == null) {
return null;
}
final StringBuilder builder = new StringBuilder();
if (!isNullOrEmpty(sourceName)) {
builder.append(sourceName);
} else {
builder.append('%');
}
if (isNullOrEmpty(databaseName) && isNullOrEmpty(tableName)) {
return builder.append('%').toString(); //query source level
}
if (!isNullOrEmpty(databaseName)) {
builder.append('/').append(databaseName);
} else {
builder.append("/%");
}
if (isNullOrEmpty(tableName)) {
return builder.append('%').toString(); //database level query
} else {
builder.append('/').append(tableName);
}
builder.append('%');
return builder.toString();
}
/**
* Get the catalog name.
*
* @return The catalog name
*/
public String getCatalogName() {
return this.catalogName;
}
/**
* Returns the database name.
*
* @return database name
*/
public String getDatabaseName() {
// TODO: This is a bad exception to throw. If its truly an illegal state exception we shouldn't allow that
// object to be built.
if (this.databaseName.isEmpty()) {
throw new IllegalStateException("This is not a database definition");
}
return this.databaseName;
}
/**
* Returns the partition name.
*
* @return partition name
*/
public String getPartitionName() {
// TODO: This is a bad exception to throw. If its truly an illegal state exception we shouldn't allow that
// object to be built.
if (partitionName.isEmpty()) {
throw new IllegalStateException("This is not a partition definition");
}
return partitionName;
}
/**
* Returns the table name.
*
* @return table name
*/
public String getTableName() {
// TODO: This is a bad exception to throw. If its truly an illegal state exception we shouldn't allow that
// object to be built.
if (tableName.isEmpty()) {
throw new IllegalStateException("This is not a table definition");
}
return tableName;
}
/**
* Returns whether other is prefix of this.
*
* @param other the other QualifiedName
* @return whether other is prefix
*/
public boolean startsWith(final QualifiedName other) {
return other == null ? true : toString().startsWith(other.toString());
}
public boolean isCatalogDefinition() {
return !catalogName.isEmpty();
}
public boolean isDatabaseDefinition() {
return !databaseName.isEmpty();
}
public boolean isPartitionDefinition() {
return !partitionName.isEmpty();
}
public boolean isTableDefinition() {
return !tableName.isEmpty();
}
private static String standardizeOptional(@Nullable final String value, final boolean forceLowerCase) {
if (value == null) {
return "";
} else {
String returnValue = value.trim();
if (forceLowerCase) {
returnValue = returnValue.toLowerCase();
}
return returnValue;
}
}
private static String standardizeRequired(final String name, @Nullable final String value) {
if (value == null) {
throw new IllegalStateException(name + " cannot be null");
}
final String returnValue = value.trim();
if (returnValue.isEmpty()) {
throw new IllegalStateException(name + " cannot be an empty string");
}
return returnValue.toLowerCase();
}
/**
* Returns the qualified name in the JSON format.
*
* @return qualified name
*/
@JsonValue
public Map<String, String> toJson() {
final Map<String, String> map = new HashMap<>(5);
map.put("qualifiedName", toString());
map.put("catalogName", catalogName);
if (!databaseName.isEmpty()) {
map.put("databaseName", databaseName);
}
if (!tableName.isEmpty()) {
map.put("tableName", tableName);
}
if (!partitionName.isEmpty()) {
map.put("partitionName", partitionName);
}
if (!viewName.isEmpty()) {
map.put("viewName", viewName);
}
return map;
}
/**
* Returns the qualified name in parts.
*
* @return parts of the qualified name as a Map
*/
public Map<String, String> parts() {
final Map<String, String> map = new HashMap<>(5);
map.put("catalogName", catalogName);
if (!databaseName.isEmpty()) {
map.put("databaseName", databaseName);
}
if (!tableName.isEmpty()) {
map.put("tableName", tableName);
}
if (!partitionName.isEmpty()) {
map.put("partitionName", partitionName);
}
if (!viewName.isEmpty()) {
map.put("viewName", viewName);
}
return map;
}
public boolean isViewDefinition() {
return !viewName.isEmpty();
}
// TODO: Replace custom equals and hashcode with generated. Tried but broke tests.
/**
* {@inheritDoc}
*/
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (!(o instanceof QualifiedName)) {
return false;
}
final QualifiedName that = (QualifiedName) o;
return Objects.equals(catalogName, that.catalogName)
&& Objects.equals(databaseName, that.databaseName)
&& Objects.equals(partitionName, that.partitionName)
&& Objects.equals(tableName, that.tableName)
&& Objects.equals(viewName, that.viewName);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return Objects.hash(catalogName, databaseName, partitionName, tableName, viewName);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(catalogName);
if (!databaseName.isEmpty()) {
sb.append('/');
sb.append(databaseName);
}
if (!tableName.isEmpty()) {
sb.append('/');
sb.append(tableName);
}
if (!partitionName.isEmpty()) {
sb.append('/');
sb.append(partitionName);
}
if (!viewName.isEmpty()) {
sb.append('/');
sb.append(viewName);
}
return sb.toString();
}
/**
* Checks if a CharSequence is empty ("") or null.
*/
private static boolean isNullOrEmpty(@Nullable final CharSequence cs) {
return cs == null || cs.length() == 0;
}
/**
* Type of the connector resource.
*/
public enum Type {
/**
* Catalog type.
*/
CATALOG("^([^\\/]+)$"),
/**
* Database type.
*/
DATABASE("^([^\\/]+)\\/([^\\/]+)$"),
/**
* Table type.
*/
TABLE("^([^\\/]+)\\/([^\\/]+)\\/([^\\/]+)$"),
/**
* Partition type.
*/
PARTITION("^(.*)$"),
/**
* MView type.
*/
MVIEW("^([^\\/]+)\\/([^\\/]+)\\/([^\\/]+)\\/([^\\/]+)$");
private final String regexValue;
/**
* Constructor.
*
* @param value category value.
*/
Type(final String value) {
this.regexValue = value;
}
/**
* get Regex Value.
* @return regex value
*/
public String getRegexValue() {
return regexValue;
}
/**
* Type create from value.
*
* @param value string value
* @return Type object
*/
public static Type fromValue(final String value) {
for (Type type : values()) {
if (type.name().equalsIgnoreCase(value)) {
return type;
}
}
throw new IllegalArgumentException(
"Unknown enum type " + value + ", Allowed values are " + Arrays.toString(values()));
}
}
}
| 9,667 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/MetacatRequestContext.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
/**
* The context of the request to metacat.
*
* @author amajumdar
* @author tgianos
* @author zhenl
*/
@Getter
public class MetacatRequestContext implements Serializable {
/**
* Request header representing the user name.
*/
public static final String HEADER_KEY_USER_NAME = "X-Netflix.user.name";
/**
* Request header representing the client application name.
*/
public static final String HEADER_KEY_CLIENT_APP_NAME = "X-Netflix.client.app.name";
/**
* Request header representing the job id.
*/
public static final String HEADER_KEY_JOB_ID = "X-Netflix.job.id";
/**
* Request header representing the data type context.
*/
public static final String HEADER_KEY_DATA_TYPE_CONTEXT = "X-Netflix.data.type.context";
/**
* Default if unknown.
*/
public static final String UNKNOWN = "UNKNOWN";
private static final long serialVersionUID = -1486145626431113817L;
private final String id = UUID.randomUUID().toString();
// TODO: Move to Java 8 and use java.time.Instant
private final long timestamp = new Date().getTime();
// the following fields are immutable.
private final String clientAppName;
private final String clientId;
private final String jobId;
private final String dataTypeContext;
private final String apiUri;
private final String scheme;
// The following fields are set during request processing and are mutable.
// The general expectation is that these would be set zero or one times.
@Setter
private String userName;
@Getter(AccessLevel.NONE)
private Map<QualifiedName, String> tableTypeMap;
@Getter
private final Map<String, String> additionalContext = new HashMap<>();
@Setter
private String requestName = UNKNOWN;
/**
* Constructor.
*/
public MetacatRequestContext() {
this.userName = null;
this.clientAppName = null;
this.clientId = null;
this.jobId = null;
this.dataTypeContext = null;
this.apiUri = UNKNOWN;
this.scheme = UNKNOWN;
this.tableTypeMap = new HashMap<>();
}
/**
* Constructor.
*
* @param userName user name
* @param clientAppName client application name
* @param clientId client id
* @param jobId job id
* @param dataTypeContext data type context
* @param apiUri the uri of rest api
* @param scheme http, thrift, internal, etc.
*/
protected MetacatRequestContext(
@Nullable final String userName,
@Nullable final String clientAppName,
@Nullable final String clientId,
@Nullable final String jobId,
@Nullable final String dataTypeContext,
final String apiUri,
final String scheme
) {
this.userName = userName;
this.clientAppName = clientAppName;
this.clientId = clientId;
this.jobId = jobId;
this.dataTypeContext = dataTypeContext;
this.apiUri = apiUri;
this.scheme = scheme;
this.tableTypeMap = new HashMap<>();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MetacatRequestContext{");
sb.append("id='").append(id).append('\'');
sb.append(", timestamp=").append(timestamp);
sb.append(", userName='").append(userName).append('\'');
sb.append(", clientAppName='").append(clientAppName).append('\'');
sb.append(", clientId='").append(clientId).append('\'');
sb.append(", jobId='").append(jobId).append('\'');
sb.append(", dataTypeContext='").append(dataTypeContext).append('\'');
sb.append(", apiUri='").append(apiUri).append('\'');
sb.append(", scheme='").append(scheme).append('\'');
sb.append(", additionalContext='").append(additionalContext).append('\'');
sb.append(", requestName='").append(requestName).append('\'');
sb.append('}');
return sb.toString();
}
/**
* Store the tableType associated with table specified by qualifiedName param.
* @param qualifiedName fully qualified name of table
* @param tableType table type of table
*/
public void updateTableTypeMap(@NonNull final QualifiedName qualifiedName, final String tableType) {
this.tableTypeMap.put(qualifiedName, tableType);
}
/**
* Clear all entries from the tableType map.
*/
public void clearTableTypeMap() {
this.tableTypeMap.clear();
}
/**
* Gets tableType.
* @param qualifiedName fully qualified name of the table
* @return the tableType associated with table specified by qualifiedName param.
*/
public String getTableType(@NonNull final QualifiedName qualifiedName) {
return this.tableTypeMap.get(qualifiedName);
}
/**
* builder class for MetacatRequestContext.
* @return the builder class for MetacatRequestContext
*/
public static MetacatRequestContext.MetacatRequestContextBuilder builder() {
return new MetacatRequestContext.MetacatRequestContextBuilder();
}
/**
* MetacatRequestContext builder class.
*/
public static class MetacatRequestContextBuilder {
private String bUserName;
private String bClientAppName;
private String bClientId;
private String bJobId;
private String bDataTypeContext;
private String bApiUri;
private String bScheme;
MetacatRequestContextBuilder() {
this.bApiUri = UNKNOWN;
this.bScheme = UNKNOWN;
}
/**
* set userName.
*
* @param userName user name at client side
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder userName(@Nullable final String userName) {
this.bUserName = userName;
return this;
}
/**
* set clientAppName.
*
* @param clientAppName application name of client
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder clientAppName(@Nullable final String clientAppName) {
this.bClientAppName = clientAppName;
return this;
}
/**
* set clientId.
*
* @param clientId client identifier, such as host name
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder clientId(@Nullable final String clientId) {
this.bClientId = clientId;
return this;
}
/**
* set jobId.
*
* @param jobId jobid from client side
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder jobId(@Nullable final String jobId) {
this.bJobId = jobId;
return this;
}
/**
* set datatypeContext.
*
* @param dataTypeContext the data type context in rest api
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder dataTypeContext(
@Nullable final String dataTypeContext) {
this.bDataTypeContext = dataTypeContext;
return this;
}
/**
* set apiUri.
*
* @param apiUri the uri in rest api
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder apiUri(final String apiUri) {
this.bApiUri = apiUri;
return this;
}
/**
* set scheme.
*
* @param scheme the scheme component in restapi such as http
* @return the builder
*/
public MetacatRequestContext.MetacatRequestContextBuilder scheme(final String scheme) {
this.bScheme = scheme;
return this;
}
/**
* builder.
*
* @return MetacatRequestContext object
*/
public MetacatRequestContext build() {
return new MetacatRequestContext(this.bUserName,
this.bClientAppName,
this.bClientId,
this.bJobId,
this.bDataTypeContext,
this.bApiUri,
this.bScheme);
}
}
}
| 9,668 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Common package for Metacat.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,669 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* Data metadata DTO.
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class DataMetadataDto extends BaseDto implements HasDataMetadata {
private static final long serialVersionUID = -874750260731085106L;
private String uri;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata")
@JsonProperty
private transient ObjectNode dataMetadata;
/**
* Returns uri.
* @return The uri that points to the location of the external data.
* @throws IllegalStateException if this instance does not have external data
*/
@Nonnull
@Override
@JsonIgnore
public String getDataUri() {
return uri;
}
/**
* Returns true if this particular instance points to external data.
* @return true if this particular instance points to external data
*/
@Override
public boolean isDataExternal() {
return false;
}
/**
* Sets the data external property.
* @param ignored is data external
*/
@SuppressWarnings("EmptyMethod")
public void setDataExternal(final boolean ignored) {
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
dataMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, dataMetadata);
}
}
| 9,670 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/Sort.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
/**
* Sort info.
*/
public class Sort {
private String sortBy;
private SortOrder order;
/**
* Default constructor.
*/
public Sort() {
}
/**
* Constructor.
*
* @param sortBy sort by
* @param order order of the list
*/
public Sort(final String sortBy, final SortOrder order) {
this.sortBy = sortBy;
this.order = order;
}
public String getSortBy() {
return sortBy;
}
public void setSortBy(final String sortBy) {
this.sortBy = sortBy;
}
public SortOrder getOrder() {
return order == null ? SortOrder.ASC : order;
}
public void setOrder(final SortOrder order) {
this.order = order;
}
/**
* True if sortBy is specified.
*
* @return true if sortBy is specified
*/
public boolean hasSort() {
return sortBy != null;
}
}
| 9,671 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/FieldDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.databind.JsonNode;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
/**
* Field DTO.
*/
@ApiModel(description = "Table field/column metadata")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@NoArgsConstructor
@Builder
@AllArgsConstructor
public class FieldDto extends BaseDto {
private static final long serialVersionUID = 9096928516299407324L;
@ApiModelProperty(value = "Comment of the field/column")
private String comment;
@ApiModelProperty(value = "Name of the field/column", required = true)
private String name;
@ApiModelProperty(value = "Is it a partition Key. If true, it is a partition key.")
@SuppressWarnings("checkstyle:membername")
private boolean partition_key;
@ApiModelProperty(value = "Position of the field/column", required = true)
private Integer pos;
@ApiModelProperty(value = "Source type of the field/column")
@SuppressWarnings("checkstyle:membername")
private String source_type;
@ApiModelProperty(value = "Type of the field/column", required = true)
private String type;
@ApiModelProperty(value = "Type of the field/column in JSON format",
accessMode = ApiModelProperty.AccessMode.READ_ONLY)
private JsonNode jsonType;
@ApiModelProperty(value = "Can the field/column be null")
private Boolean isNullable;
@ApiModelProperty(value = "Size of the field/column")
private Integer size;
@ApiModelProperty(value = "Default value of the column")
private String defaultValue;
@ApiModelProperty(value = "Is the column a sorted key")
private Boolean isSortKey;
@ApiModelProperty(value = "Is the column an index key")
private Boolean isIndexKey;
@SuppressWarnings("checkstyle:methodname")
public String getSource_type() {
return source_type;
}
@SuppressWarnings("checkstyle:methodname")
public void setSource_type(final String sourceType) {
this.source_type = sourceType;
}
@SuppressWarnings("checkstyle:methodname")
public boolean isPartition_key() {
return partition_key;
}
@SuppressWarnings("checkstyle:methodname")
public void setPartition_key(final boolean partitionKey) {
this.partition_key = partitionKey;
}
}
| 9,672 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseCreateRequestDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Map;
/**
* Database create request.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class DatabaseCreateRequestDto extends BaseDto {
private static final long serialVersionUID = 6308417213106650174L;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the physical data")
@JsonProperty
private transient ObjectNode definitionMetadata;
@ApiModelProperty(value = "Any extra metadata properties of the database")
private Map<String, String> metadata;
@ApiModelProperty(value = "URI of the database. Only applies to certain data sources like hive, S3")
private String uri;
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, definitionMetadata);
}
}
| 9,673 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasMetadata.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import java.io.Serializable;
/**
* Marker interface for objects with metadata.
*/
public interface HasMetadata extends Serializable {
}
| 9,674 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/ViewDto.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
/**
* Hive Virtual View Dto information.
* @author zhenl
* @since 1.2.0
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class ViewDto extends BaseDto {
private static final long serialVersionUID = -1044988220491063480L;
@ApiModelProperty(value = "View original text.", required = true)
private String viewOriginalText;
@ApiModelProperty(value = "View expanded text.")
private String viewExpandedText;
}
| 9,675 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/DefinitionMetadataDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* Definition metadata DTO.
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class DefinitionMetadataDto extends BaseDto implements HasDefinitionMetadata {
private static final long serialVersionUID = 3826462875655878L;
private QualifiedName name;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata")
@JsonProperty
private transient ObjectNode definitionMetadata;
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, definitionMetadata);
}
@Override
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
}
| 9,676 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* Database information.
*/
@ApiModel(description = "Tables and other information about the given database")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
public class DatabaseDto extends BaseDto implements HasDefinitionMetadata {
private static final long serialVersionUID = -4530516372664788451L;
private Date dateCreated;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the logical database")
@JsonProperty
private transient ObjectNode definitionMetadata;
private Date lastUpdated;
@ApiModelProperty(value = "the name of this entity", required = true)
@JsonProperty
private QualifiedName name;
@ApiModelProperty(value = "Names of the tables in this database", required = true)
private List<String> tables;
@ApiModelProperty(value = "Connector type of this catalog", required = true)
private String type;
@ApiModelProperty(value = "Any extra metadata properties of the database")
private Map<String, String> metadata;
@ApiModelProperty(value = "URI of the database. Only applies to certain data sources like hive, S3")
private String uri;
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, definitionMetadata);
}
}
| 9,677 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/GetPartitionsRequestDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* Partition get request.
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class GetPartitionsRequestDto extends BaseDto {
private String filter;
private List<String> partitionNames;
private Boolean includePartitionDetails = false;
/*
for audit tables with wap patterns
if true, getPartitionsForRequest will only return audit table's
own partitions. Metacat does not interpreate this flag for regular
table, i.e. they should always return their own partitions.
*/
private Boolean includeAuditOnly = false;
}
| 9,678 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/StorageDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Storage DTO.
* <pre>
* {
* "inputFormat": "org.apache.hadoop.mapred.TextInputFormat",
* "outputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
* "serializationLib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
* "parameters": {
* "serialization.format": "1"
* },
* "owner": "charsmith"
* }
* </pre>
*/
@ApiModel(description = "Serialization/Deserialization metadata of the table data")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class StorageDto extends BaseDto {
private static final long serialVersionUID = 4933906340321707232L;
@ApiModelProperty(value = "Input format of the table data stored")
private String inputFormat;
@ApiModelProperty(value = "Output format of the table data stored")
private String outputFormat;
@ApiModelProperty(value = "Owner of the table")
private String owner;
@ApiModelProperty(value = "Extra storage parameters")
private Map<String, String> parameters;
@ApiModelProperty(value = "Extra storage parameters")
private Map<String, String> serdeInfoParameters;
@ApiModelProperty(value = "Serialization library of the data")
private String serializationLib;
@ApiModelProperty(value = "URI of the table. Only applies to certain data sources like hive, S3")
private String uri;
}
| 9,679 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveRequestDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import io.swagger.annotations.ApiModelProperty;
import io.swagger.annotations.ApiParam;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.List;
/**
* Partition save request.
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class PartitionsSaveRequestDto extends BaseDto {
private static final long serialVersionUID = -5922699691074685961L;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to this table")
@JsonProperty
private transient ObjectNode dataMetadata;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the physical data")
@JsonProperty
private transient ObjectNode definitionMetadata;
// List of partitions
@ApiParam(value = "List of partitions", required = true)
private List<PartitionDto> partitions;
// List of partition ids/names for deletes
private List<String> partitionIdsForDeletes;
// If true, we check if partition exists and drop it before adding it back. If false, we do not check and just add.
private Boolean checkIfExists = true;
// If true, we alter if partition exists. If checkIfExists=false, then this is false too.
private Boolean alterIfExists = false;
// If true, metacat will only update metadata. For optimization purpose, metacat skips the partition validation
// and other flags settings, such as checkIfExists, for partition operations.
private Boolean saveMetadataOnly = false;
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
dataMetadata = deserializeObjectNode(in);
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, dataMetadata);
serializeObjectNode(out, definitionMetadata);
}
@Override
public String toString() {
return "PartitionsSaveRequestDto{" + "dataMetadata=" + dataMetadata + ", definitionMetadata="
+ definitionMetadata + ", partitions=" + partitions + ", partitionIdsForDeletes="
+ partitionIdsForDeletes + ", checkIfExists=" + checkIfExists + ", alterIfExists=" + alterIfExists
+ '}';
}
}
| 9,680 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/TagRemoveRequestDto.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* Tag Remove Request Dto.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class TagRemoveRequestDto extends BaseDto {
private static final long serialVersionUID = 8698531483258796673L;
@ApiModelProperty(value = "The qualified name", required = true)
private QualifiedName name;
@ApiModelProperty(value = "True to delete all tags")
private Boolean deleteAll;
@ApiModelProperty(value = "Tags to remove")
private List<String> tags;
}
| 9,681 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/SortOrder.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
/**
* Sort order.
*/
public enum SortOrder {
/** Ascending order. */
ASC,
/** Descending order. */
DESC
}
| 9,682 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveResponseDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import java.util.ArrayList;
import java.util.List;
/**
* Partition save response.
*/
public class PartitionsSaveResponseDto extends BaseDto {
/**
* List of added partition names.
*/
private List<String> added;
/**
* List of updated partition names.
*/
private List<String> updated;
/**
* Default constructor.
*/
public PartitionsSaveResponseDto() {
added = new ArrayList<>();
updated = new ArrayList<>();
}
public List<String> getAdded() {
return added;
}
/**
* Sets list of added partition names.
*
* @param added list of added partition names
*/
public void setAdded(final List<String> added) {
if (added != null) {
this.added = added;
}
}
public List<String> getUpdated() {
return updated;
}
/**
* Sets list of updated partition names.
*
* @param updated list of updated partition names
*/
public void setUpdated(final List<String> updated) {
if (updated != null) {
this.updated = updated;
}
}
}
| 9,683 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.List;
import java.util.Map;
/**
* Information about a catalog.
*/
@ApiModel(description = "Information about a catalog")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
public class CatalogDto extends BaseDto implements HasDefinitionMetadata {
private static final long serialVersionUID = -5713826608609231492L;
@ApiModelProperty(value = "a list of the names of the databases that belong to this catalog", required = true)
private List<String> databases;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the logical catalog")
@JsonProperty
private transient ObjectNode definitionMetadata;
@ApiModelProperty(value = "the name of this entity", required = true)
@JsonProperty
private QualifiedName name;
@ApiModelProperty(value = "the type of the connector of this catalog", required = true)
private String type;
@ApiModelProperty(value = "Cluster information referred to by this catalog", required = true)
@JsonProperty
private ClusterDto cluster;
@JsonProperty
private Map<String, String> metadata;
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, definitionMetadata);
}
}
| 9,684 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDataMetadata.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.databind.node.ObjectNode;
import javax.annotation.Nonnull;
/**
* Marker interface for objects with data metadata.
*/
public interface HasDataMetadata extends HasMetadata {
/**
* Returns data metadata.
*
* @return data metadata
*/
ObjectNode getDataMetadata();
/**
* Sets the data metadata json.
*
* @param metadata data metadata json
*/
void setDataMetadata(ObjectNode metadata);
/**
* Returns uri.
*
* @return The uri that points to the location of the external data.
* @throws IllegalStateException if this instance does not have external data
*/
@Nonnull
String getDataUri();
/**
* Returns true if this particular instance points to external data.
*
* @return true if this particular instance points to external data
*/
boolean isDataExternal();
}
| 9,685 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/ResolveByUriRequestDto.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import lombok.Data;
import lombok.EqualsAndHashCode;
/**
* ResolveByUriRequestDto.
*
* @author zhenl
* @since 1.0.0
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class ResolveByUriRequestDto extends BaseDto {
private static final long serialVersionUID = -2649784382533439526L;
private String uri;
}
| 9,686 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/Pageable.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import lombok.Data;
/**
* Represents the pagination information.
*
* @author amajumdar
*/
@Data
public class Pageable {
private Integer limit;
private Integer offset;
/**
* Default constructor.
*/
public Pageable() {
}
/**
* Constructor.
*
* @param limit size of the list
* @param offset offset of the list
*/
public Pageable(final Integer limit, final Integer offset) {
this.limit = limit;
this.offset = offset;
}
public Integer getOffset() {
return offset == null ? Integer.valueOf(0) : offset;
}
public boolean isPageable() {
return limit != null;
}
}
| 9,687 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDefinitionMetadata.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
/**
* Marker interface for objects with data metadata.
*/
public interface HasDefinitionMetadata extends HasMetadata {
/**
* Returns definition metadata.
*
* @return definition metadata
*/
ObjectNode getDefinitionMetadata();
/**
* Sets definition metadata.
*
* @param metadata definition metadata
*/
void setDefinitionMetadata(ObjectNode metadata);
/**
* Returns the qualified name.
*
* @return qualified name
*/
QualifiedName getDefinitionName();
}
| 9,688 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataGetRequestDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import lombok.Data;
import lombok.EqualsAndHashCode;
/**
* Data metadata request.
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class DataMetadataGetRequestDto extends BaseDto {
private String uri;
}
| 9,689 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/CreateCatalogDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* Information required to create a new catalog.
*/
@ApiModel(description = "Information required to create a new catalog")
@Data
@EqualsAndHashCode(callSuper = false)
public class CreateCatalogDto extends BaseDto implements HasDefinitionMetadata {
private static final long serialVersionUID = -6745573078608938941L;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the logical catalog")
@JsonProperty
private transient ObjectNode definitionMetadata;
@ApiModelProperty(value = "the name of this entity", required = true)
@JsonProperty
private QualifiedName name;
@ApiModelProperty(value = "the type of the connector of this catalog", required = true)
private String type;
@Override
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, definitionMetadata);
}
}
| 9,690 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/AuditDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.Date;
/**
* Audit information.
*/
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class AuditDto extends BaseDto {
private static final long serialVersionUID = 9221109874202093789L;
/* Created By */
@ApiModelProperty(value = "User name who created the table")
private String createdBy;
/* Created date */
@ApiModelProperty(value = "Creation date")
private Date createdDate;
/* Last modified by */
@ApiModelProperty(value = "User name who last modified the table")
private String lastModifiedBy;
/* Last modified date */
@ApiModelProperty(value = "Last modified date")
private Date lastModifiedDate;
}
| 9,691 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Map;
/**
* Partition DTO.
*/
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class PartitionDto extends BaseDto implements HasDataMetadata, HasDefinitionMetadata {
private static final long serialVersionUID = 783462697901395508L;
@ApiModelProperty(value = "audit information about the partition")
private AuditDto audit;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "Physical metadata: metadata about the physical data referred by the partition.")
@JsonProperty
private transient ObjectNode dataMetadata;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "Logical metadata: metadata about the logical construct of the partition.")
@JsonProperty
private transient ObjectNode definitionMetadata;
@ApiModelProperty(value = "the name of this entity", required = true)
@JsonProperty
private QualifiedName name;
@ApiModelProperty(value = "Storage/Serialization/Deserialization info of the partition ")
private StorageDto serde;
@ApiModelProperty(value = "Any extra metadata properties of the partition")
private Map<String, String> metadata;
@Nonnull
@Override
@JsonIgnore
public String getDataUri() {
final String uri = serde != null ? serde.getUri() : null;
if (uri == null || uri.isEmpty()) {
throw new IllegalStateException("This instance does not have external data");
}
return uri;
}
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
@Override
@JsonProperty
public boolean isDataExternal() {
return serde != null && serde.getUri() != null && !serde.getUri().isEmpty();
}
/**
* Sets the data external property.
*
* @param ignored is data external
*/
@SuppressWarnings("EmptyMethod")
public void setDataExternal(final boolean ignored) {
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
dataMetadata = deserializeObjectNode(in);
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, dataMetadata);
serializeObjectNode(out, definitionMetadata);
}
}
| 9,692 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/BaseDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
/**
* Base class for all common DTOs.
*
* @author amajumdar
*/
//TODO: All DTO's should be READ-ONLY
public abstract class BaseDto implements Serializable {
protected static final MetacatJsonLocator METACAT_JSON_LOCATOR = new MetacatJsonLocator();
/**
* Deserialize the input stream.
*
* @param inputStream input stream
* @return Json node
* @throws IOException exception deserializing the stream
*/
@Nullable
public static ObjectNode deserializeObjectNode(
@Nonnull @NonNull final ObjectInputStream inputStream
) throws IOException {
return METACAT_JSON_LOCATOR.deserializeObjectNode(inputStream);
}
/**
* Serialize the stream.
*
* @param outputStream output stream
* @param json Json Node
* @throws IOException exception serializing the json
*/
public static void serializeObjectNode(
@Nonnull @NonNull final ObjectOutputStream outputStream,
@Nullable final ObjectNode json
) throws IOException {
METACAT_JSON_LOCATOR.serializeObjectNode(outputStream, json);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return METACAT_JSON_LOCATOR.toJsonString(this);
}
}
| 9,693 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/ClusterDto.java
|
package com.netflix.metacat.common.dto;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* Catalog cluster information.
*
* @author rveeramacheneni
* @since 1.3.0
*/
@ApiModel(description = "Information about the catalog cluster")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ClusterDto implements Serializable {
private static final long serialVersionUID = 3575620733293405903L;
/** Name of the cluster. */
@ApiModelProperty(value = "the cluster hosting this catalog", required = false)
private String name;
/** Type of the cluster. */
@ApiModelProperty(value = "the type of the cluster", required = true)
private String type;
/** Name of the account under which the cluster was created. Ex: "abc_test" */
@ApiModelProperty(value = "Name of the account under which the cluster was created.", required = false)
private String account;
/** Id of the Account under which the cluster was created. Ex: "abc_test" */
@ApiModelProperty(value = "Id of the Account under which the cluster was created", required = false)
private String accountId;
/** Environment under which the cluster exists. Ex: "prod", "test" */
@ApiModelProperty(value = "the environment in which the cluster exists", required = false)
private String env;
/** Region in which the cluster exists. Ex: "us-east-1" */
@ApiModelProperty(value = "the region of this cluster", required = false)
private String region;
}
| 9,694 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/TableDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* Table DTO.
*/
@ApiModel(description = "Table metadata")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class TableDto extends BaseDto implements HasDataMetadata, HasDefinitionMetadata {
private static final long serialVersionUID = 5922768252406041451L;
@ApiModelProperty(value = "Contains information about table changes")
private AuditDto audit;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the physical data")
@JsonProperty
private transient ObjectNode dataMetadata;
// Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it
@ApiModelProperty(value = "metadata attached to the logical table")
@JsonProperty
private transient ObjectNode definitionMetadata;
private List<FieldDto> fields;
@ApiModelProperty(value = "Any extra metadata properties of the database table")
private Map<String, String> metadata;
@ApiModelProperty(value = "the name of this entity", required = true)
@JsonProperty
private QualifiedName name;
@ApiModelProperty(value = "serialization/deserialization info about the table")
private StorageDto serde;
@ApiModelProperty(value = "Hive virtual view info.")
//Naming as view required by dozer mapping
private ViewDto view;
@Nonnull
@Override
@JsonIgnore
public String getDataUri() {
final String uri = serde != null ? serde.getUri() : null;
if (uri == null || uri.isEmpty()) {
throw new IllegalStateException("This instance does not have external data");
}
return uri;
}
@JsonIgnore
public QualifiedName getDefinitionName() {
return name;
}
@JsonIgnore
public Optional<String> getTableOwner() {
return Optional.ofNullable(definitionMetadata)
.map(definitionMetadataJson -> definitionMetadataJson.get("owner"))
.map(ownerJson -> ownerJson.get("userId"))
.map(JsonNode::textValue);
}
@JsonIgnore
public Optional<String> getTableOwnerGroup() {
return Optional.ofNullable(definitionMetadata)
.map(definitionMetadataJson -> definitionMetadataJson.get("owner"))
.map(ownerJson -> ownerJson.get("google_group"))
.map(JsonNode::textValue);
}
/**
* Returns the list of partition keys.
* @return list of partition keys
*/
@ApiModelProperty(value = "List of partition key names")
@JsonProperty
@SuppressWarnings("checkstyle:methodname")
public List<String> getPartition_keys() {
if (fields == null) {
return null;
} else if (fields.isEmpty()) {
return Collections.emptyList();
}
final List<String> keys = new LinkedList<>();
for (FieldDto field : fields) {
if (field.isPartition_key()) {
keys.add(field.getName());
}
}
return keys;
}
/**
* Sets the partition keys.
* @param ignored list of partition keys
*/
@SuppressWarnings({"EmptyMethod", "checkstyle:methodname"})
public void setPartition_keys(final List<String> ignored) {
}
@Override
@JsonProperty
public boolean isDataExternal() {
return serde != null && serde.getUri() != null && !serde.getUri().isEmpty();
}
/**
* Sets the data external property.
* @param ignored is data external
*/
@SuppressWarnings("EmptyMethod")
public void setDataExternal(final boolean ignored) {
}
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
dataMetadata = deserializeObjectNode(in);
definitionMetadata = deserializeObjectNode(in);
}
private void writeObject(final ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
serializeObjectNode(out, dataMetadata);
serializeObjectNode(out, definitionMetadata);
}
}
| 9,695 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/ResolveByUriResponseDto.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.netflix.metacat.common.QualifiedName;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.util.List;
/**
* ResolveByUriResponseDto.
*
* @author zhenl
* @since 1.0.0
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class ResolveByUriResponseDto extends BaseDto {
private static final long serialVersionUID = -4505346090786555046L;
private List<QualifiedName> tables;
private List<QualifiedName> partitions;
}
| 9,696 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Package containing the common DTOs used between the Metacat client and server.
*
* @author amajumdar
*/
package com.netflix.metacat.common.dto;
| 9,697 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogMappingDto.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
/**
* The name and type of a catalog.
*/
@ApiModel(description = "The name and type of a catalog")
@SuppressWarnings("unused")
@Data
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class CatalogMappingDto extends BaseDto {
private static final long serialVersionUID = -1223516438943164936L;
@ApiModelProperty(value = "The name of the catalog", required = true)
private String catalogName;
@ApiModelProperty(value = "The connector type of the catalog", required = true)
private String connectorName;
@ApiModelProperty(value = "Cluster information referred by this catalog", required = true)
@JsonProperty
private ClusterDto clusterDto;
}
| 9,698 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/TagCreateRequestDto.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto;
import com.netflix.metacat.common.QualifiedName;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* Tag Create Request Dto.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class TagCreateRequestDto extends BaseDto {
private static final long serialVersionUID = -990374882621118670L;
@ApiModelProperty(value = "The qualified name", required = true)
private QualifiedName name;
@ApiModelProperty(value = "Tags to insert")
private List<String> tags;
}
| 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.