index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/ApiConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.metacat.main.api.ApiFilter;
import com.netflix.metacat.main.api.MetacatErrorController;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.boot.web.error.ErrorAttributeOptions;
import org.springframework.boot.web.servlet.FilterRegistrationBean;
import org.springframework.boot.web.servlet.error.DefaultErrorAttributes;
import org.springframework.boot.web.servlet.error.ErrorAttributes;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.context.request.WebRequest;
import org.springframework.web.servlet.config.annotation.ContentNegotiationConfigurer;
import org.springframework.web.servlet.config.annotation.PathMatchConfigurer;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter;
import java.util.Map;
/**
* Spring configuration for the API tier.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class ApiConfig extends WebMvcConfigurerAdapter {
/**
* {@inheritDoc}
* <p>
* Turn off {@literal .} Turn off suffix-based content negotiation. The table name may have extension, e.g. knp,
* , which is a type and will be rejected by spring
*
* @see <a href="https://stackoverflow.com/questions/30793717">Stack Overflow Issue</a>
*/
@Override
public void configureContentNegotiation(final ContentNegotiationConfigurer configurer) {
configurer.favorPathExtension(false);
}
/**
* {@inheritDoc}
* <p>
* Turn off {@literal .} recognition in paths. Needed due to table's name potentially having '.' as character.
*
* @see <a href="https://docs.spring.io/spring/docs/current/spring-framework-reference/html/mvc.html">SpringDoc</a>
*/
@Override
public void configurePathMatch(final PathMatchConfigurer configurer) {
configurer.setUseSuffixPatternMatch(false);
}
/**
* The rest filter registration bean.
*
* @param apiFilter the api filter
* @return The rest filter
*/
@Bean
public FilterRegistrationBean metacatApiFilter(final ApiFilter apiFilter) {
final FilterRegistrationBean registrationBean = new FilterRegistrationBean();
registrationBean.setFilter(apiFilter);
registrationBean.addUrlPatterns("/mds/*");
return registrationBean;
}
/**
* The API filter.
*
* @return the API filter.
*/
@Bean
@ConditionalOnMissingBean(ApiFilter.class)
public ApiFilter apiFilter() {
return new ApiFilter();
}
/**
* Override the default error attributes for backwards compatibility with older clients.
*
* @return New set of error attributes with 'message' copied into 'error'
*/
@Bean
public ErrorAttributes errorAttributes() {
return new DefaultErrorAttributes() {
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> getErrorAttributes(final WebRequest webRequest,
final ErrorAttributeOptions options) {
final Map<String, Object> errorAttributes
= super.getErrorAttributes(webRequest, options);
errorAttributes.put("error", errorAttributes.get("message"));
return errorAttributes;
}
};
}
/**
* Returns the error controller.
* @param errorAttributes error attributes
* @param serverProperties server properties
* @return error controller
*/
@Bean
public MetacatErrorController metacatErrorController(final ErrorAttributes errorAttributes,
final ServerProperties serverProperties) {
return new MetacatErrorController(errorAttributes, serverProperties.getError());
}
}
| 2,100 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/SNSNotificationsConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.AmazonSNSClient;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.main.services.notifications.sns.SNSNotificationMetric;
import com.netflix.metacat.main.services.notifications.sns.SNSNotificationServiceImpl;
import com.netflix.metacat.main.services.notifications.sns.SNSNotificationServiceUtil;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration for SNS Notifications.
*
* @author tgianos
* @since 1.1.0
*/
@Slf4j
@Configuration
@ConditionalOnProperty(value = "metacat.notifications.sns.enabled", havingValue = "true")
public class SNSNotificationsConfig {
/**
* If SNS notifications are desired and no existing client has been created elsewhere
* in the application create a default client here.
*
* @return The configured SNS client
*/
//TODO: See what spring-cloud-aws would provide automatically...
@Bean
@ConditionalOnMissingBean(AmazonSNS.class)
public AmazonSNS amazonSNS() {
return new AmazonSNSClient(DefaultAWSCredentialsProviderChain.getInstance());
}
/**
* SNS Notification Publisher.
*
* @param amazonSNS The SNS client to use
* @param config The system configuration abstraction to use
* @param objectMapper The object mapper to use
* @param snsNotificationMetric The sns notification metric
* @param snsNotificationServiceUtil The SNS notification util
* @return Configured Notification Service bean
*/
@Bean
public SNSNotificationServiceImpl snsNotificationService(
final AmazonSNS amazonSNS,
final Config config,
final ObjectMapper objectMapper,
final SNSNotificationMetric snsNotificationMetric,
final SNSNotificationServiceUtil snsNotificationServiceUtil
) {
final String tableArn = config.getSnsTopicTableArn();
if (StringUtils.isEmpty(tableArn)) {
throw new IllegalStateException(
"SNS Notifications are enabled but no table ARN provided. Unable to configure."
);
}
final String partitionArn = config.getSnsTopicPartitionArn();
if (StringUtils.isEmpty(partitionArn)) {
throw new IllegalStateException(
"SNS Notifications are enabled but no partition ARN provided. Unable to configure."
);
}
log.info("SNS notifications are enabled. Creating SNSNotificationServiceImpl bean.");
return new SNSNotificationServiceImpl(amazonSNS,
tableArn, partitionArn, objectMapper, config, snsNotificationMetric, snsNotificationServiceUtil);
}
/**
* SNS Notification Service Util.
*
* @param userMetadataService user metadata service
* @return SNSNotificationServiceUtil
*/
@Bean
public SNSNotificationServiceUtil snsNotificationServiceUtil(
final UserMetadataService userMetadataService
) {
return new SNSNotificationServiceUtil(userMetadataService);
}
/**
* SNS Notification Metric.
*
* @param registry registry for spectator
* @return Notification Metric bean
*/
@Bean
public SNSNotificationMetric snsNotificationMetric(
final Registry registry
) {
return new SNSNotificationMetric(registry);
}
}
| 2,101 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/ElasticSearchConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.google.common.base.Splitter;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.metacat.main.services.search.ElasticSearchCatalogTraversalAction;
import com.netflix.metacat.main.services.search.ElasticSearchEventHandlers;
import com.netflix.metacat.main.services.search.ElasticSearchRefresh;
import com.netflix.metacat.main.services.search.ElasticSearchUtil;
import com.netflix.metacat.main.services.search.ElasticSearchUtilImpl;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* Configuration for ElasticSearch which triggers when metacat.elasticsearch.enabled is true.
*
* @author tgianos
* @author zhenl
* @since 1.1.0
*/
@Slf4j
@Configuration
@ConditionalOnProperty(value = "metacat.elasticsearch.enabled", havingValue = "true")
public class ElasticSearchConfig {
/**
* The ElasticSearch client.
*
* @param config System config
* @return Configured client or error
*/
@Bean
@ConditionalOnMissingBean(Client.class)
public Client elasticSearchClient(final Config config) {
final String clusterName = config.getElasticSearchClusterName();
if (StringUtils.isBlank(clusterName)) {
throw new IllegalStateException("No cluster name set. Unable to continue");
}
final Settings settings = Settings.builder()
.put("cluster.name", clusterName)
.put("client.transport.sniff", true) //to dynamically add new hosts and remove old ones
.put("transport.tcp.connect_timeout", "60s")
.build();
final TransportClient client = new PreBuiltTransportClient(settings);
// Add the transport address if exists
final String clusterNodesStr = config.getElasticSearchClusterNodes();
if (StringUtils.isNotBlank(clusterNodesStr)) {
final int port = config.getElasticSearchClusterPort();
final Iterable<String> clusterNodes = Splitter.on(',').split(clusterNodesStr);
clusterNodes.
forEach(
clusterNode -> {
try {
client.addTransportAddress(
new InetSocketTransportAddress(InetAddress.getByName(clusterNode), port)
);
} catch (UnknownHostException exception) {
log.error("Skipping unknown host {}", clusterNode);
}
}
);
}
if (client.transportAddresses().isEmpty()) {
throw new IllegalStateException("No Elasticsearch cluster nodes added. Unable to create client.");
}
return client;
}
/**
* ElasticSearch utility wrapper.
*
* @param client The configured ElasticSearch client
* @param config System config
* @param metacatJson JSON utilities
* @param registry spectator registry
* @return The ElasticSearch utility instance
*/
@Bean
public ElasticSearchUtil elasticSearchUtil(
final Client client,
final Config config,
final MetacatJson metacatJson,
final Registry registry
) {
return new ElasticSearchUtilImpl(client, config, metacatJson, registry);
}
/**
* Event handler instance to publish event payloads to ElasticSearch.
*
* @param elasticSearchUtil The client wrapper utility to use
* @param registry registry of spectator
* @param config System config
* @return The event handler instance
*/
@Bean
public ElasticSearchEventHandlers elasticSearchEventHandlers(
final ElasticSearchUtil elasticSearchUtil,
final Registry registry,
final Config config
) {
return new ElasticSearchEventHandlers(elasticSearchUtil, registry, config);
}
/**
* The refresher of ElasticSearch.
*
* @param config System config
* @param eventBus Event bus
* @param catalogService Catalog service
* @param databaseService Database service
* @param tableService Table service
* @param partitionService Partition service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param elasticSearchUtil ElasticSearch client wrapper
* @param registry registry of spectator
* @return The refresh bean
*/
@Bean
public ElasticSearchRefresh elasticSearchRefresh(
final Config config,
final MetacatEventBus eventBus,
final CatalogService catalogService,
final DatabaseService databaseService,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final TagService tagService,
final ElasticSearchUtil elasticSearchUtil,
final Registry registry
) {
return new ElasticSearchRefresh(
config,
eventBus,
catalogService,
databaseService,
tableService,
partitionService,
userMetadataService,
tagService,
elasticSearchUtil,
registry
);
}
/**
* Traversal action implementation for ElasticSearch refresh.
*
* @param config System config
* @param eventBus Event bus
* @param databaseService Database service
* @param tableService Table service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param elasticSearchUtil ElasticSearch client wrapper
* @param registry registry of spectator
* @return The refresh bean
*/
@Bean
public ElasticSearchCatalogTraversalAction elasticSearchCatalogTraversalAction(
final Config config,
final MetacatEventBus eventBus,
final DatabaseService databaseService,
final TableService tableService,
final UserMetadataService userMetadataService,
final TagService tagService,
final ElasticSearchUtil elasticSearchUtil,
final Registry registry
) {
return new ElasticSearchCatalogTraversalAction(
config,
eventBus,
databaseService,
tableService,
userMetadataService,
tagService,
elasticSearchUtil,
registry
);
}
}
| 2,102 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Spring configuration classes.
*
* @author tgianos
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.configs;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,103 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/RequestWrapper.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.exception.MetacatAlreadyExistsException;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.exception.MetacatPreconditionFailedException;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.exception.MetacatUserMetadataException;
import com.netflix.metacat.common.server.api.traffic_control.RequestGateway;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.AliasService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.validation.constraints.NotNull;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
/**
* Request wrapper.
*
* @author amajumdar
* @since 0.1.50
*/
@Slf4j
@Component
public final class RequestWrapper {
private final Registry registry;
private final Config config;
private final AliasService aliasService;
private final RequestGateway requestGateway;
//Metrics
private final Id requestCounterId;
private final Id requestFailureCounterId;
private final Id requestTimerId;
/**
* Wrapper class for processing the request.
*
* @param registry registry
* @param config Config
* @param aliasService AliasService
* @param requestGateway RequestGateway
*/
@Autowired
public RequestWrapper(@NotNull @NonNull final Registry registry,
@NotNull @NonNull final Config config,
@NotNull @NonNull final AliasService aliasService,
@NotNull @NonNull final RequestGateway requestGateway) {
this.registry = registry;
this.config = config;
this.aliasService = aliasService;
this.requestGateway = requestGateway;
requestCounterId = registry.createId(Metrics.CounterRequestCount.getMetricName());
requestFailureCounterId = registry.createId(Metrics.CounterRequestFailureCount.getMetricName());
requestTimerId = registry.createId(Metrics.TimerRequest.getMetricName());
}
/**
* Creates the qualified name.
*
* @param nameSupplier supplier
* @return name
*/
public QualifiedName qualifyName(final Supplier<QualifiedName> nameSupplier) {
try {
final QualifiedName name = nameSupplier.get();
if (config.isTableAliasEnabled() && name.getType() == QualifiedName.Type.TABLE) {
return aliasService.getTableName(name);
}
return name;
} catch (Exception e) {
log.error("Invalid qualified name", e);
throw new MetacatBadRequestException(e.getMessage());
}
}
/**
* Request wrapper to to process request.
*
* @param name name
* @param resourceRequestName request name
* @param supplier supplier
* @param <R> response
* @return response of supplier
*/
public <R> R processRequest(
final QualifiedName name,
final String resourceRequestName,
final Supplier<R> supplier) {
return processRequest(name, resourceRequestName, Collections.emptyMap(), supplier);
}
/**
* Request wrapper to to process request.
*
* @param name name
* @param resourceRequestName request name
* @param requestTags tags that needs to be added to the registry
* @param supplier supplier
* @param <R> response
* @return response of supplier
*/
public <R> R processRequest(
final QualifiedName name,
final String resourceRequestName,
final Map<String, String> requestTags,
final Supplier<R> supplier) {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<>(name.parts());
if (requestTags != null) {
tags.putAll(requestTags);
}
tags.put("request", resourceRequestName);
tags.put("scheme", MetacatContextManager.getContext().getScheme());
registry.counter(requestCounterId.withTags(tags)).increment();
try {
requestGateway.validateRequest(resourceRequestName, name);
MetacatContextManager.getContext().setRequestName(resourceRequestName);
log.info("### Calling method: {} for {}", resourceRequestName, name);
return supplier.get();
} catch (UnsupportedOperationException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotSupportedException("Catalog does not support the operation. " + e.getMessage());
} catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatAlreadyExistsException(e.getMessage());
} catch (NotFoundException | MetacatNotFoundException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotFoundException(
String.format("Unable to locate for %s. Details: %s", name, e.getMessage()));
} catch (InvalidMetaException | IllegalArgumentException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatBadRequestException(
String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (TablePreconditionFailedException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatPreconditionFailedException(
String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (ConnectorException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(),
e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex.getMessage().contains("too many connections")
|| ex.getMessage().contains("Timeout: Pool empty")) {
throw new MetacatTooManyRequestsException(ex.getMessage());
}
}
throw new MetacatException(message, e);
} catch (UserMetadataServiceException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s usermetadata operation failed for %s", e.getMessage(),
e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
throw new MetacatUserMetadataException(message);
} catch (Exception e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(),
e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
if (e instanceof MetacatException) {
throw e;
} else {
throw new MetacatException(message, e);
}
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("### Time taken to complete {} for {} is {} ms", resourceRequestName, name, duration);
tryAddTableTypeTag(tags, name);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
private static void tryAddTableTypeTag(final Map<String, String> tags, final QualifiedName qualifiedName) {
final MetacatRequestContext context = MetacatContextManager.getContext();
final String tableType = context.getTableType(qualifiedName);
if (!StringUtils.isBlank(tableType)) {
tags.put("tableType", tableType.toLowerCase());
}
}
/**
* Simple request wrapper to process request.
*
* @param resourceRequestName request name
* @param supplier supplier
* @param <R> response
* @return response of the supplier
*/
public <R> R processRequest(
final String resourceRequestName,
final Supplier<R> supplier) {
final long start = registry.clock().wallTime();
final Map<String, String> tags = Maps.newHashMap();
tags.put("request", resourceRequestName);
registry.counter(requestCounterId.withTags(tags)).increment();
try {
MetacatContextManager.getContext().setRequestName(resourceRequestName);
log.info("### Calling method: {}", resourceRequestName);
return supplier.get();
} catch (UnsupportedOperationException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotSupportedException("Catalog does not support the operation. " + e.getMessage());
} catch (IllegalArgumentException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatBadRequestException(String.format("%s.%s", e.getMessage(),
e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (Exception e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String
.format("%s.%s -- %s failed.",
e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(),
resourceRequestName);
log.error(message, e);
if (e instanceof MetacatException) {
throw e;
} else {
throw new MetacatException(message, e);
}
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("### Time taken to complete {} is {} ms", resourceRequestName,
duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
private void collectRequestExceptionMetrics(final Map<String, String> tags, final String exceptionName) {
tags.put("exception", exceptionName);
registry.counter(requestFailureCounterId.withTags(tags)).increment();
}
}
| 2,104 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/MetacatErrorController.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api;
import org.springframework.boot.autoconfigure.web.servlet.error.AbstractErrorController;
import org.springframework.boot.web.error.ErrorAttributeOptions;
import org.springframework.boot.web.servlet.error.ErrorAttributes;
import org.springframework.boot.autoconfigure.web.ErrorProperties;
import org.springframework.boot.autoconfigure.web.servlet.error.ErrorViewResolver;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import javax.servlet.http.HttpServletRequest;
import java.util.Collections;
import java.util.Map;
/**
* Error controller.
*
* @author amajumdar
* @since 1.2.0
*/
@RequestMapping("${server.error.path:${error.path:/error}}")
public class MetacatErrorController extends AbstractErrorController {
private final ErrorProperties errorProperties;
/**
* Default constructor.
* @param errorAttributes error attributes
* @param errorProperties error properties
*/
public MetacatErrorController(final ErrorAttributes errorAttributes, final ErrorProperties errorProperties) {
super(errorAttributes, Collections.<ErrorViewResolver>emptyList());
this.errorProperties = errorProperties;
}
/**
* Mapping for error handling.
* @param request http request
* @return error response
*/
@RequestMapping
@ResponseBody
public ResponseEntity<Map<String, Object>> error(final HttpServletRequest request) {
final Map<String, Object> body = getErrorAttributes(request, getErrorAttributeOptions(request));
final HttpStatus status = getStatus(request);
return new ResponseEntity<>(body, status);
}
private ErrorAttributeOptions getErrorAttributeOptions(final HttpServletRequest request) {
ErrorAttributeOptions options = ErrorAttributeOptions.defaults();
if (includeStackTrace(request)) {
options = options.including(ErrorAttributeOptions.Include.STACK_TRACE);
}
if (includeMessage(request)) {
options = options.including(ErrorAttributeOptions.Include.MESSAGE);
}
return options;
}
@SuppressWarnings("deprecation")
private boolean includeStackTrace(final HttpServletRequest request) {
switch (this.errorProperties.getIncludeStacktrace()) {
case ALWAYS:
return true;
case ON_PARAM:
return getBooleanParameter(request, "trace");
default:
return false;
}
}
private boolean includeMessage(final HttpServletRequest request) {
switch (this.errorProperties.getIncludeMessage()) {
case ALWAYS:
return true;
case ON_PARAM:
return getBooleanParameter(request, "message");
default:
return false;
}
}
}
| 2,105 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/IndexController.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
/**
* Default controller.
*/
@RestController
@RequestMapping("/mds")
public class IndexController {
/**
* Index API.
*/
@RequestMapping(method = RequestMethod.GET)
@ResponseStatus(HttpStatus.OK)
public void index() {
// TODO: Great place for hypermedia
}
}
| 2,106 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/ExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api;
import com.netflix.metacat.common.exception.MetacatAlreadyExistsException;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatException;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.exception.MetacatPreconditionFailedException;
import com.netflix.metacat.common.exception.MetacatUserMetadataException;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.exception.MetacatUnAuthorizedException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
/**
* Exception mapper for converting exceptions in application to web status error messages.
*
* @author tgianos
* @since 1.1.0
*/
@Slf4j
@ControllerAdvice
public class ExceptionMapper {
/**
* Handle Metacat Exceptions.
*
* @param response The HTTP response
* @param e The exception to handle
* @throws IOException on error in sending error
*/
@ExceptionHandler(MetacatException.class)
public void handleMetacatException(
final HttpServletResponse response,
final MetacatException e
) throws IOException {
final int status;
boolean logErrorLevel = false;
if (e instanceof MetacatAlreadyExistsException) {
status = HttpStatus.CONFLICT.value();
} else if (e instanceof MetacatBadRequestException) {
status = HttpStatus.BAD_REQUEST.value();
} else if (e instanceof MetacatPreconditionFailedException) {
status = HttpStatus.PRECONDITION_FAILED.value();
} else if (e instanceof MetacatNotFoundException) {
status = HttpStatus.NOT_FOUND.value();
} else if (e instanceof MetacatNotSupportedException) {
logErrorLevel = true;
status = HttpStatus.NOT_IMPLEMENTED.value();
} else if (e instanceof MetacatUserMetadataException) {
// TODO: This makes no sense
status = HttpStatus.SEE_OTHER.value();
} else if (e instanceof MetacatTooManyRequestsException) {
status = HttpStatus.TOO_MANY_REQUESTS.value();
} else if (e instanceof MetacatUnAuthorizedException) {
status = HttpStatus.FORBIDDEN.value();
} else {
logErrorLevel = true;
status = HttpStatus.INTERNAL_SERVER_ERROR.value();
}
if (logErrorLevel) {
log.error(e.getLocalizedMessage(), e);
} else {
log.warn(e.getLocalizedMessage(), e);
}
response.sendError(status, e.getLocalizedMessage());
}
}
| 2,107 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/ApiFilter.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.api;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import lombok.extern.slf4j.Slf4j;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
/**
* REST Interceptor.
*
* @author amajumdar
* @author tgianos
* @since 1.1.0
*/
@Slf4j
public class ApiFilter implements Filter {
/**
* {@inheritDoc}
*/
@Override
public void init(final FilterConfig filterConfig) throws ServletException {
}
/**
* {@inheritDoc}
*/
@Override
public void doFilter(final ServletRequest request,
final ServletResponse response,
final FilterChain chain) throws IOException, ServletException {
preFilter(request, response, chain);
try {
chain.doFilter(request, response);
} finally {
postFilter(request, response, chain);
}
}
protected void preFilter(final ServletRequest request,
final ServletResponse response,
final FilterChain chain) throws ServletException {
// Pre-processing
if (!(request instanceof HttpServletRequest)) {
throw new ServletException("Expected an HttpServletRequest but didn't get one");
}
final HttpServletRequest httpServletRequest = (HttpServletRequest) request;
String userName = httpServletRequest.getHeader(MetacatRequestContext.HEADER_KEY_USER_NAME);
if (userName == null) {
userName = "metacat";
}
final String clientAppName = httpServletRequest.getHeader(MetacatRequestContext.HEADER_KEY_CLIENT_APP_NAME);
final String clientId = httpServletRequest.getHeader("X-Forwarded-For");
final String jobId = httpServletRequest.getHeader(MetacatRequestContext.HEADER_KEY_JOB_ID);
final String dataTypeContext = httpServletRequest.getHeader(MetacatRequestContext.HEADER_KEY_DATA_TYPE_CONTEXT);
final MetacatRequestContext context = buildRequestContext(
userName, clientAppName, clientId, jobId, dataTypeContext,
httpServletRequest.getScheme(),
httpServletRequest.getRequestURI(),
httpServletRequest
);
MetacatContextManager.setContext(context);
log.info(context.toString());
}
protected MetacatRequestContext buildRequestContext(final String userName,
final String clientAppName,
final String clientId,
final String jobId,
final String dataTypeContext,
final String scheme,
final String requestUri,
final HttpServletRequest httpServletRequest) {
return MetacatRequestContext.builder()
.userName(userName)
.clientAppName(clientAppName)
.clientId(clientId)
.jobId(jobId)
.dataTypeContext(dataTypeContext)
.scheme(scheme)
.apiUri(requestUri)
.build();
}
protected void postFilter(final ServletRequest request,
final ServletResponse response,
final FilterChain chain) throws ServletException {
MetacatContextManager.removeContext();
}
/**
* {@inheritDoc}
*/
@Override
public void destroy() {
}
}
| 2,108 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes REST API resources.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.api;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,109 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/PartitionController.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api.v1;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.api.v1.PartitionV1;
import com.netflix.metacat.main.api.RequestWrapper;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.PartitionService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import io.swagger.annotations.ApiResponse;
import io.swagger.annotations.ApiResponses;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Nullable;
import java.net.HttpURLConnection;
import java.util.Collections;
import java.util.List;
/**
* Partition V1 API implementation.
*
* @author amajumdar
* @author zhenl
*/
@RestController
@RequestMapping(
path = "/mds/v1/partition",
produces = MediaType.APPLICATION_JSON_VALUE
)
@Api(value = "PartitionV1",
description = "Federated partition metadata operations",
produces = MediaType.APPLICATION_JSON_VALUE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class PartitionController implements PartitionV1 {
private final MetacatController v1;
private final MViewService mViewService;
private final PartitionService partitionService;
private final RequestWrapper requestWrapper;
/**
* Delete named partitions from a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionIds lis of partition names
*/
@RequestMapping(
method = RequestMethod.DELETE,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
value = "Delete named partitions from a table",
notes = "List of partitions names of the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were deleted successfully"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
),
@ApiResponse(
code = HttpURLConnection.HTTP_BAD_REQUEST,
message = "The list of partitionNames is not present"
)
}
)
@Override
public void deletePartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "partitionId of the partitions to be deleted from this table", required = true)
@RequestBody final List<String> partitionIds
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
this.requestWrapper.processRequest(
name,
"deleteTablePartition",
() -> {
if (partitionIds.isEmpty()) {
throw new IllegalArgumentException("partitionIds are required");
}
this.partitionService.delete(name, partitionIds);
return null;
}
);
}
/**
* Delete partitions for the given view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName metacat view name
* @param partitionIds list of partition names
*/
@RequestMapping(
method = RequestMethod.DELETE,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
value = "Delete partitions for the given view",
notes = "Delete partitions for the given view"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were deleted successfully"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
),
@ApiResponse(
code = HttpURLConnection.HTTP_BAD_REQUEST,
message = "The list of partitionNames is not present"
)
}
)
public void deletePartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "partitionId of the partitions to be deleted from this table", required = true)
@RequestBody final List<String> partitionIds
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
this.requestWrapper.processRequest(
name,
"deleteMViewPartition",
() -> {
if (partitionIds.isEmpty()) {
throw new IllegalArgumentException("partitionIds are required");
}
this.mViewService.deletePartitions(name, partitionIds);
return null;
}
);
}
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partitions for a table",
notes = "List of partitions for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public List<PartitionDto> getPartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "false") final boolean includeUserMetadata
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getPartitions",
Collections.singletonMap("filterPassed", StringUtils.isEmpty(filter) ? "false" : "true"),
() -> this.partitionService.list(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
includeUserMetadata,
includeUserMetadata,
new GetPartitionsRequestDto(filter, null, false, false)
)
);
}
/**
* Return list of partitions for a metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @return list of partitions for a metacat view
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partitions for a metacat view",
notes = "List of partitions for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<PartitionDto> getPartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "false") final boolean includeUserMetadata
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getPartitions",
Collections.singletonMap("filterPassed", StringUtils.isEmpty(filter) ? "false" : "true"),
() -> this.mViewService.listPartitions(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
includeUserMetadata,
new GetPartitionsRequestDto(filter, null, false, true)
)
);
}
/**
* Return list of partitions for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a table
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partitions for a table",
notes = "List of partitions for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were retrieved"
),
@ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public List<PartitionDto> getPartitionsForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "false") final boolean includeUserMetadata,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this.getPartitions(
catalogName,
databaseName,
tableName,
sortBy,
sortOrder,
offset,
limit,
includeUserMetadata,
getPartitionsRequestDto
);
}
/**
* Return list of partitions for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param includeUserMetadata whether to include user metadata for every partition in the list
* @param getPartitionsRequestDto request
* @return list of partitions for a view
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partitions for a metacat view",
notes = "List of partitions for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<PartitionDto> getPartitionsForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "false") final boolean includeUserMetadata,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this.getPartitions(
catalogName,
databaseName,
tableName,
viewName,
sortBy,
sortOrder,
offset,
limit,
includeUserMetadata,
getPartitionsRequestDto
);
}
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition keys for a table",
notes = "List of partition keys for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions keys were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public List<String> getPartitionKeys(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit
) {
return this._getPartitionKeys(
catalogName,
databaseName,
tableName,
sortBy,
sortOrder,
offset,
limit,
new GetPartitionsRequestDto(filter, null, false, false)
);
}
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition names for a view
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition keys for a metacat view",
notes = "List of partition keys for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions keys were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<String> getPartitionKeys(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit
) {
return this._getMViewPartitionKeys(
catalogName,
databaseName,
tableName,
viewName,
sortBy,
sortOrder,
offset,
limit,
new GetPartitionsRequestDto(filter, null, false, true)
);
}
/**
* Return list of partition names for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a table
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys-request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition keys for a table",
notes = "List of partition keys for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions keys were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public List<String> getPartitionKeysForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this._getPartitionKeys(
catalogName,
databaseName,
tableName,
sortBy,
sortOrder,
offset,
limit,
getPartitionsRequestDto
);
}
/**
* Return list of partition names for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition names for a view
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys-request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition keys for a metacat view",
notes = "List of partition keys for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions keys were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<String> getPartitionKeysForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this._getMViewPartitionKeys(
catalogName,
databaseName,
tableName,
viewName,
sortBy,
sortOrder,
offset,
limit,
getPartitionsRequestDto
);
}
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition uris for a table",
notes = "List of partition uris for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions uris were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public List<String> getPartitionUris(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit
) {
return this._getPartitionUris(
catalogName,
databaseName,
tableName,
sortBy,
sortOrder,
offset,
limit,
new GetPartitionsRequestDto(filter, null, false, false)
);
}
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param filter filter expression
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @return list of partition uris for a table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition uris for a metacat view",
notes = "List of partition uris for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions uris were retrieved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<String> getPartitionUris(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(name = "filter", required = false) final String filter,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit
) {
return this._getMViewPartitionUris(
catalogName,
databaseName,
tableName,
viewName,
sortBy,
sortOrder,
offset,
limit,
new GetPartitionsRequestDto(filter, null, false, true)
);
}
/**
* Return list of partition uris for a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a table
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris-request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition uris for a table",
notes = "List of partition uris for the given table name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions uris were retrieved"
),
@ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public List<String> getPartitionUrisForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this._getPartitionUris(
catalogName,
databaseName,
tableName,
sortBy,
sortOrder,
offset,
limit,
getPartitionsRequestDto
);
}
/**
* Return list of partition uris for a view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param sortBy sort by this name
* @param sortOrder sort order to use
* @param offset offset of the list
* @param limit size of the list
* @param getPartitionsRequestDto request
* @return list of partition uris for a view
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris-request",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "List of partition uris for a metacat view",
notes = "List of partition uris for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partitions uris were retrieved"
),
@ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public List<String> getPartitionUrisForRequest(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Sort the partition list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the partition list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "Request containing the filter expression for the partitions")
@Nullable @RequestBody(required = false) final GetPartitionsRequestDto getPartitionsRequestDto
) {
return this._getMViewPartitionUris(
catalogName,
databaseName,
tableName,
viewName,
sortBy,
sortOrder,
offset,
limit,
getPartitionsRequestDto
);
}
/**
* Add/update partitions to the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 5,
value = "Add/update partitions to the given table",
notes = "Add/update partitions to the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The partitions were successfully saved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public PartitionsSaveResponseDto savePartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Request containing the list of partitions", required = true)
@RequestBody final PartitionsSaveRequestDto partitionsSaveRequestDto
) {
final QualifiedName name = QualifiedName.ofTable(catalogName, databaseName, tableName);
return this.requestWrapper.processRequest(
name,
"saveTablePartition",
() -> {
final PartitionsSaveResponseDto result;
if (partitionsSaveRequestDto.getPartitions() == null
|| partitionsSaveRequestDto.getPartitions().isEmpty()) {
result = new PartitionsSaveResponseDto();
} else {
result = this.partitionService.save(name, partitionsSaveRequestDto);
// This metadata is actually for the table, if it is present update that
if (partitionsSaveRequestDto.getDefinitionMetadata() != null
|| partitionsSaveRequestDto.getDataMetadata() != null) {
final TableDto dto = new TableDto();
dto.setName(name);
dto.setDefinitionMetadata(partitionsSaveRequestDto.getDefinitionMetadata());
dto.setDataMetadata(partitionsSaveRequestDto.getDataMetadata());
this.v1.updateTable(catalogName, databaseName, tableName, dto);
}
}
return result;
}
);
}
/**
* Add/update partitions to the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param partitionsSaveRequestDto partition request containing the list of partitions to be added/updated
* @return Response with the number of partitions added/updated
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 5,
value = "Add/update partitions to the given table",
notes = "Add/update partitions to the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The partitions were successfully saved"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public PartitionsSaveResponseDto savePartitions(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "Request containing the list of partitions", required = true)
@RequestBody final PartitionsSaveRequestDto partitionsSaveRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"saveMViewPartition",
() -> {
final PartitionsSaveResponseDto result;
if (partitionsSaveRequestDto.getPartitions() == null
|| partitionsSaveRequestDto.getPartitions().isEmpty()) {
result = new PartitionsSaveResponseDto();
} else {
result = mViewService.savePartitions(name, partitionsSaveRequestDto, true);
// This metadata is actually for the view, if it is present update that
if (partitionsSaveRequestDto.getDefinitionMetadata() != null
|| partitionsSaveRequestDto.getDataMetadata() != null) {
final TableDto dto = new TableDto();
dto.setName(name);
dto.setDefinitionMetadata(partitionsSaveRequestDto.getDefinitionMetadata());
dto.setDataMetadata(partitionsSaveRequestDto.getDataMetadata());
this.v1.updateMView(catalogName, databaseName, tableName, viewName, dto);
}
}
return result;
}
);
}
/**
* Get the partition count for the given table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return partition count for the given table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/count"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 5,
value = "Partition count for the given table",
notes = "Partition count for the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partition count was returned successfully"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public Integer getPartitionCount(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getPartitionCount",
() -> this.partitionService.count(name)
);
}
/**
* Get the partition count for the given metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return partition count for the given view
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/count"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 5,
value = "Partition count for the given table",
notes = "Partition count for the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The partition count was returned successfully"
),
@ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public Integer getPartitionCount(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the view", required = true)
@PathVariable("view-name") final String viewName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getPartitionCount",
() -> this.mViewService.partitionCount(name)
);
}
private List<PartitionDto> getPartitions(
final String catalogName,
final String databaseName,
final String tableName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
final boolean includeUserMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getPartitions",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> partitionService.list(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
includeUserMetadata,
includeUserMetadata,
getPartitionsRequestDto
)
);
}
private List<PartitionDto> getPartitions(
final String catalogName,
final String databaseName,
final String tableName,
final String viewName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
final boolean includeUserMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getPartitions",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> this.mViewService.listPartitions(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
includeUserMetadata,
getPartitionsRequestDto
)
);
}
@SuppressWarnings("checkstyle:methodname")
private List<String> _getPartitionUris(
final String catalogName,
final String databaseName,
final String tableName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getPartitionUris",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> this.partitionService.getPartitionUris(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
getPartitionsRequestDto
)
);
}
@SuppressWarnings("checkstyle:methodname")
private List<String> _getMViewPartitionKeys(
final String catalogName,
final String databaseName,
final String tableName,
final String viewName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getMViewPartitionKeys",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> this.mViewService.getPartitionKeys(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
getPartitionsRequestDto
)
);
}
@SuppressWarnings("checkstyle:methodname")
private List<String> _getMViewPartitionUris(
final String catalogName,
final String databaseName,
final String tableName,
final String viewName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getMViewPartitionUris",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> this.mViewService.getPartitionUris(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
getPartitionsRequestDto
)
);
}
@SuppressWarnings("checkstyle:methodname")
private List<String> _getPartitionKeys(
final String catalogName,
final String databaseName,
final String tableName,
@Nullable final String sortBy,
@Nullable final SortOrder sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getPartitionKeys",
Collections.singletonMap("filterPassed",
getPartitionsRequestDto == null || StringUtils.isEmpty(
getPartitionsRequestDto.getFilter()) ? "false" : "true"),
() -> partitionService.getPartitionKeys(
name,
new Sort(sortBy, sortOrder),
new Pageable(limit, offset),
getPartitionsRequestDto
)
);
}
}
| 2,110 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/TagController.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.api.v1;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.TagCreateRequestDto;
import com.netflix.metacat.common.dto.TagRemoveRequestDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.api.RequestWrapper;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetCatalogServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.TableService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import io.swagger.annotations.ApiResponse;
import io.swagger.annotations.ApiResponses;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.WebDataBinder;
import org.springframework.web.bind.annotation.InitBinder;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Nullable;
import java.beans.PropertyEditorSupport;
import java.net.HttpURLConnection;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
/**
* Tag API implementation.
*
* @author amajumdar
*/
@RestController
@RequestMapping(
path = "/mds/v1/tag",
produces = MediaType.APPLICATION_JSON_VALUE
)
@Api(
value = "TagV1",
description = "Federated metadata tag operations",
produces = MediaType.APPLICATION_JSON_VALUE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class TagController {
private final RequestWrapper requestWrapper;
private final TagService tagService;
private final MetacatEventBus eventBus;
private final TableService tableService;
private final DatabaseService databaseService;
private final CatalogService catalogService;
private final MViewService mViewService;
/**
* Return the list of tags.
*
* @return list of tags
*/
@RequestMapping(method = RequestMethod.GET, path = "/tags")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Returns the tags",
notes = "Returns the tags"
)
public Set<String> getTags() {
return this.requestWrapper.processRequest(
"TagV1Resource.getTags",
this.tagService::getTags
);
}
/**
* Returns the list of qualified names for the given input.
*
* @param includeTags Set of matching tags
* @param excludeTags Set of un-matching tags
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @param type metacat qualifed name type
* @return list of qualified names
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/list"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Returns the list of qualified names that are tagged with the given tags."
+ " Qualified names will be excluded if the contained tags matches the excluded tags",
notes = "Returns the list of qualified names that are tagged with the given tags."
+ " Qualified names will be excluded if the contained tags matches the excluded tags"
)
public List<QualifiedName> list(
@ApiParam(value = "Set of matching tags")
@Nullable @RequestParam(name = "include", required = false) final Set<String> includeTags,
@ApiParam(value = "Set of un-matching tags")
@Nullable @RequestParam(name = "exclude", required = false) final Set<String> excludeTags,
@ApiParam(value = "Prefix of the source name")
@Nullable @RequestParam(name = "sourceName", required = false) final String sourceName,
@ApiParam(value = "Prefix of the database name")
@Nullable @RequestParam(name = "databaseName", required = false) final String databaseName,
@ApiParam(value = "Prefix of the table name")
@Nullable @RequestParam(name = "tableName", required = false) final String tableName,
@ApiParam(value = "Qualified name type")
@Nullable
@RequestParam(name = "type", required = false) final QualifiedName.Type type
) {
return this.requestWrapper.processRequest(
"TagV1Resource.list",
() -> this.tagService.list(
includeTags,
excludeTags,
sourceName,
databaseName,
tableName,
type)
);
}
/**
* Returns the list of qualified names that are tagged with tags containing the given tag text.
*
* @param tag Tag partial text
* @param sourceName Prefix of the source name
* @param databaseName Prefix of the database name
* @param tableName Prefix of the table name
* @return list of qualified names
*/
@RequestMapping(method = RequestMethod.GET, path = "/search")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Returns the list of qualified names that are tagged with tags containing the given tagText",
notes = "Returns the list of qualified names that are tagged with tags containing the given tagText"
)
public List<QualifiedName> search(
@ApiParam(value = "Tag partial text")
@Nullable @RequestParam(name = "tag", required = false) final String tag,
@ApiParam(value = "Prefix of the source name")
@Nullable @RequestParam(name = "sourceName", required = false) final String sourceName,
@ApiParam(value = "Prefix of the database name")
@Nullable @RequestParam(name = "databaseName", required = false) final String databaseName,
@ApiParam(value = "Prefix of the table name")
@Nullable @RequestParam(name = "tableName", required = false) final String tableName
) {
return this.requestWrapper.processRequest(
"TagV1Resource.search",
() -> tagService.search(tag, sourceName, databaseName, tableName)
);
}
/**
* Sets the tags on the given object.
*
* @param tagCreateRequestDto tag create request dto
* @return set of tags
*/
@RequestMapping(
method = RequestMethod.POST,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
value = "Sets the tags on the given resource",
notes = "Sets the tags on the given resource"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The tags were successfully created"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public Set<String> setTags(
@ApiParam(value = "Request containing the set of tags and qualifiedName", required = true)
@RequestBody final TagCreateRequestDto tagCreateRequestDto
) {
return this.requestWrapper.processRequest(
tagCreateRequestDto.getName(),
"TagV1Resource.setTags",
() -> this.setResourceTags(tagCreateRequestDto)
);
}
private Set<String> setResourceTags(@NonNull final TagCreateRequestDto tagCreateRequestDto) {
final QualifiedName name = tagCreateRequestDto.getName();
final Set<String> tags = new HashSet<>(tagCreateRequestDto.getTags());
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
Set<String> result = new HashSet<>();
switch (name.getType()) {
case CATALOG:
//catalog service will throw exception if not found
this.catalogService.get(name, GetCatalogServiceParameters.builder()
.includeDatabaseNames(false).includeUserMetadata(false).build());
return this.tagService.setTags(name, tags, true);
case DATABASE:
if (!this.databaseService.exists(name)) {
throw new DatabaseNotFoundException(name);
}
result = this.tagService.setTags(name, tags, true);
this.eventBus.post(
new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this)
);
return result;
case TABLE:
if (!this.tableService.exists(name)) {
throw new TableNotFoundException(name);
}
final TableDto oldTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
result = this.tagService.setTags(name, tags, true);
final TableDto currentTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.eventBus.post(
new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable, currentTable)
);
return result;
case MVIEW:
if (!this.mViewService.exists(name)) {
throw new MetacatNotFoundException(name.toString());
}
final Optional<TableDto> oldView = this.mViewService.getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build()
);
if (oldView.isPresent()) {
result = this.tagService.setTags(name, tags, true);
final Optional<TableDto> currentView = this.mViewService
.getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build());
currentView.ifPresent(p ->
this.eventBus.post(
new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldView.get(),
currentView.get())
)
);
return result;
}
break;
default:
throw new MetacatNotFoundException("Unsupported qualifiedName type {}" + name);
}
return result;
}
/**
* Sets the tags on the given table.
* TODO: remove after setTags api is adopted
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param tags set of tags
* @return set of tags
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
value = "Sets the tags on the given table",
notes = "Sets the tags on the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The tags were successfully created on the table"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public Set<String> setTableTags(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "Set of tags", required = true)
@RequestBody final Set<String> tags
) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"TagV1Resource.setTableTags",
() -> {
// TODO: shouldn't this be in the tag service?
if (!this.tableService.exists(name)) {
throw new TableNotFoundException(name);
}
final TableDto oldTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
final Set<String> result = this.tagService.setTags(name, tags, true);
final TableDto currentTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.eventBus.post(
new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable, currentTable)
);
return result;
}
);
}
/**
* Remove the tags from the given table.
* TODO: remove after removeTags api is adopted
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param deleteAll True if all tags need to be removed
* @param tags Tags to be removed from the given table
*/
@RequestMapping(
method = RequestMethod.DELETE,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 4,
value = "Remove the tags from the given table",
notes = "Remove the tags from the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_NO_CONTENT,
message = "The tags were successfully deleted from the table"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public void removeTableTags(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "True if all tags need to be removed")
@RequestParam(name = "all", defaultValue = "false") final boolean deleteAll,
@ApiParam(value = "Tags to be removed from the given table")
@Nullable @RequestBody(required = false) final Set<String> tags
) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
this.requestWrapper.processRequest(
name,
"TagV1Resource.removeTableTags",
() -> {
//TODO: Business logic in API tier...
if (!this.tableService.exists(name)) {
// Delete tags if exists
this.tagService.delete(name, false);
throw new TableNotFoundException(name);
}
final TableDto oldTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.tagService.removeTags(name, deleteAll, tags, true);
final TableDto currentTable = this.tableService
.get(name, GetTableServiceParameters.builder().includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.eventBus.post(
new MetacatUpdateTablePostEvent(
name,
metacatRequestContext,
this,
oldTable,
currentTable
)
);
return null;
}
);
}
/**
* Remove the tags from the given resource.
*
* @param tagRemoveRequestDto remove tag request dto
*/
@RequestMapping(
method = RequestMethod.DELETE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
value = "Remove the tags from the given resource",
notes = "Remove the tags from the given resource"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_NO_CONTENT,
message = "The tags were successfully deleted from the table"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public void removeTags(
@ApiParam(value = "Request containing the set of tags and qualifiedName", required = true)
@RequestBody final TagRemoveRequestDto tagRemoveRequestDto
) {
this.requestWrapper.processRequest(
tagRemoveRequestDto.getName(),
"TagV1Resource.removeTableTags",
() -> {
this.removeResourceTags(tagRemoveRequestDto);
return null;
}
);
}
private void removeResourceTags(final TagRemoveRequestDto tagRemoveRequestDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName name = tagRemoveRequestDto.getName();
switch (name.getType()) {
case CATALOG:
//catalog service will throw exception if not found
this.catalogService.get(name, GetCatalogServiceParameters.builder()
.includeDatabaseNames(false).includeUserMetadata(false).build());
this.tagService.removeTags(name, tagRemoveRequestDto.getDeleteAll(),
new HashSet<>(tagRemoveRequestDto.getTags()), true);
break;
case DATABASE:
if (!this.databaseService.exists(name)) {
throw new DatabaseNotFoundException(name);
}
this.tagService.removeTags(name, tagRemoveRequestDto.getDeleteAll(),
new HashSet<>(tagRemoveRequestDto.getTags()), true);
this.eventBus.post(
new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this)
);
break;
case TABLE:
if (!this.tableService.exists(name)) {
this.tagService.delete(name, false);
throw new TableNotFoundException(name);
}
final TableDto oldTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.tagService.removeTags(name, tagRemoveRequestDto.getDeleteAll(),
new HashSet<>(tagRemoveRequestDto.getTags()), true);
final TableDto currentTable = this.tableService
.get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build())
.orElseThrow(IllegalStateException::new);
this.eventBus.post(
new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable, currentTable)
);
break;
case MVIEW:
if (!this.mViewService.exists(name)) {
throw new MetacatNotFoundException(name.toString());
}
final Optional<TableDto> oldView = this.mViewService.getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build()
);
if (oldView.isPresent()) {
this.tagService.removeTags(name, tagRemoveRequestDto.getDeleteAll(),
new HashSet<>(tagRemoveRequestDto.getTags()), true);
final Optional<TableDto> currentView = this.mViewService
.getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build());
currentView.ifPresent(p ->
this.eventBus.post(
new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldView.get(),
currentView.get())
)
);
}
break;
default:
throw new MetacatNotFoundException("Unsupported qualifiedName type {}" + name);
}
}
@InitBinder
private void bindsCustomRequestParamType(final WebDataBinder dataBinder) {
dataBinder.registerCustomEditor(QualifiedName.Type.class, new QualifiedNameTypeConverter());
}
private static class QualifiedNameTypeConverter extends PropertyEditorSupport {
@Override
public void setAsText(final String text) throws IllegalArgumentException {
super.setValue(QualifiedName.Type.fromValue(text));
}
}
}
| 2,111 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/MetacatController.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.api.v1;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import com.netflix.metacat.common.dto.DatabaseCreateRequestDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.server.api.v1.MetacatV1;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.api.RequestWrapper;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetCatalogServiceParameters;
import com.netflix.metacat.main.services.GetDatabaseServiceParameters;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.MetacatServiceHelper;
import com.netflix.metacat.main.services.TableService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import io.swagger.annotations.ApiResponse;
import io.swagger.annotations.ApiResponses;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Nullable;
import javax.validation.Valid;
import java.net.HttpURLConnection;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.function.Supplier;
/**
* Metacat V1 API implementation.
*/
@RestController
@RequestMapping(
path = "/mds/v1",
produces = MediaType.APPLICATION_JSON_VALUE
)
@Api(
value = "MetacatV1",
description = "Federated metadata operations",
produces = MediaType.APPLICATION_JSON_VALUE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@Slf4j
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class MetacatController implements MetacatV1 {
private final CatalogService catalogService;
private final DatabaseService databaseService;
private final MViewService mViewService;
private final TableService tableService;
private final RequestWrapper requestWrapper;
private final Config config;
/**
* Simple get on / to show API is up and available.
*/
@RequestMapping(method = RequestMethod.GET)
@ResponseStatus(HttpStatus.NO_CONTENT)
public void index() {
// TODO: Hypermedia
}
/**
* Creates a new catalog.
*
* @param createCatalogDto catalog
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 3,
value = "Creates a new catalog",
notes = "Returns success if there were no errors creating the catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_IMPLEMENTED,
message = "Not yet implemented"
)
}
)
public void createCatalog(@Valid @RequestBody final CreateCatalogDto createCatalogDto) {
throw new MetacatNotSupportedException("Create catalog is not supported.");
}
/**
* Creates the given database in the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
* @param databaseCreateRequestDto database create request
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 2,
value = "Creates the given database in the given catalog",
notes = "Given a catalog and a database name, creates the database in the catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The database was created"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database cannot be located"
)
}
)
@Override
public void createDatabase(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The database information")
@Nullable @RequestBody(required = false) final DatabaseCreateRequestDto databaseCreateRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofDatabase(catalogName, databaseName)
);
this.requestWrapper.processRequest(
name,
"createDatabase",
() -> {
final DatabaseDto newDto = new DatabaseDto();
newDto.setName(name);
if (databaseCreateRequestDto != null) {
newDto.setUri(databaseCreateRequestDto.getUri());
newDto.setMetadata(databaseCreateRequestDto.getMetadata());
newDto.setDefinitionMetadata(databaseCreateRequestDto.getDefinitionMetadata());
}
this.databaseService.create(name, newDto);
return null;
}
);
}
/**
* Creates a table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table TableDto with table details
* @return created <code>TableDto</code> table
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 2,
value = "Creates a table",
notes = "Creates the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The table was created"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public TableDto createTable(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The table information", required = true)
@Valid @RequestBody final TableDto table
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
if (MetacatServiceHelper.isIcebergTable(table)) {
MetacatContextManager.getContext().updateTableTypeMap(name, MetacatServiceHelper.ICEBERG_TABLE_TYPE);
}
log.info("Creating table: {} with info: {}", name, table);
return this.requestWrapper.processRequest(
name,
"createTable",
() -> {
Preconditions.checkArgument(
table.getName() != null
&& tableName.equalsIgnoreCase(table.getName().getTableName()
),
"Table name does not match the name in the table"
);
return this.tableService.create(name, table);
}
);
}
/**
* Creates a metacat view. A staging table that can contain partitions referring to the table partition locations.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param snapshot boolean to snapshot or not
* @param filter filter expression to use
* @return created <code>TableDto</code> mview
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.CREATED)
@ApiOperation(
position = 2,
value = "Creates a metacat view. A staging table that can contain partitions referring to the table partition "
+ "locations.",
notes = "Creates the given metacat view. A staging table that can contain partitions referring to the table "
+ "partition locations."
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_CREATED,
message = "The mView was created"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public TableDto createMView(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(
value = "To snapshot a list of partitions of the table to this view. "
+ "If true, it will restore the partitions from the table to this view."
)
@RequestParam(name = "snapshot", defaultValue = "false") final boolean snapshot,
@ApiParam(value = "Filter expression string to use")
@Nullable @RequestParam(value = "filter", required = false) final String filter
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"createMView",
() -> this.mViewService.createAndSnapshotPartitions(name, snapshot, filter)
);
}
/**
* Deletes the given database from the given catalog.
*
* @param catalogName catalog name
* @param databaseName database name
*/
@RequestMapping(method = RequestMethod.DELETE, path = "/catalog/{catalog-name}/database/{database-name}")
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 4,
value = "Deletes the given database from the given catalog",
notes = "Given a catalog and database, deletes the database from the catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Database was successfully deleted"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database cannot be located"
)
}
)
public void deleteDatabase(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofDatabase(catalogName, databaseName)
);
this.requestWrapper.processRequest(
name,
"deleteDatabase",
() -> {
this.databaseService.delete(name);
return null;
}
);
}
/**
* Delete table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return deleted <code>TableDto</code> table.
*/
@RequestMapping(
method = RequestMethod.DELETE,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 4,
value = "Delete table",
notes = "Deletes the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Table was successfully deleted"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public TableDto deleteTable(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"deleteTable",
() -> this.tableService.deleteAndReturn(name, false)
);
}
/**
* Delete metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return deleted <code>TableDto</code> mview.
*/
@RequestMapping(
method = RequestMethod.DELETE,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 4,
value = "Delete metacat view",
notes = "Deletes the given metacat view"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "View was successfully deleted"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or metacat view cannot be located"
)
}
)
public TableDto deleteMView(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the metacat view", required = true)
@PathVariable("view-name") final String viewName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"deleteMView",
() -> this.mViewService.deleteAndReturn(name)
);
}
@Override
public CatalogDto getCatalog(final String catalogName) {
return getCatalog(catalogName, true, true);
}
/**
* Get the catalog by name.
*
* @param catalogName catalog name
* @return catalog
*/
@RequestMapping(method = RequestMethod.GET, path = "/catalog/{catalog-name}")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 2,
value = "Databases for the requested catalog",
notes = "The list of databases that belong to the given catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The catalog is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog cannot be located"
)
}
)
@Override
public CatalogDto getCatalog(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "Whether to include list of database names")
@Nullable @RequestParam(name = "includeDatabaseNames", required = false) final Boolean includeDatabaseNames,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "true") final boolean includeUserMetadata) {
final QualifiedName name = this.requestWrapper.qualifyName(() -> QualifiedName.ofCatalog(catalogName));
return this.requestWrapper.processRequest(
name,
"getCatalog",
() -> this.catalogService.get(name, GetCatalogServiceParameters.builder()
.includeDatabaseNames(includeDatabaseNames == null
? config.listDatabaseNameByDefaultOnGetCatalog() : includeDatabaseNames)
.includeUserMetadata(includeUserMetadata).build())
);
}
/**
* List registered catalogs.
*
* @return registered catalogs.
*/
@RequestMapping(method = RequestMethod.GET, path = "/catalog")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "List registered catalogs",
notes = "The names and types of all catalogs registered with this server"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The catalogs are returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "No catalogs are registered with the server"
)
}
)
public List<CatalogMappingDto> getCatalogNames() {
final QualifiedName name = QualifiedName.ofCatalog("getCatalogNames");
return this.requestWrapper.processRequest(
name,
"getCatalogNames",
this.catalogService::getCatalogNames);
}
/**
* Get the database with the list of table names under it.
*
* @param catalogName catalog name
* @param databaseName database name
* @param includeUserMetadata true if details should include user metadata
* @return database with details
*/
@RequestMapping(method = RequestMethod.GET, path = "/catalog/{catalog-name}/database/{database-name}")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Tables for the requested database",
notes = "The list of tables that belong to the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The database is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database cannot be located"
)
}
)
@Override
public DatabaseDto getDatabase(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "Whether to include user metadata information to the response")
@RequestParam(name = "includeUserMetadata", defaultValue = "true") final boolean includeUserMetadata,
@ApiParam(value = "Whether to include list of table names")
@Nullable @RequestParam(name = "includeTableNames", required = false) final Boolean includeTableNames
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofDatabase(catalogName, databaseName)
);
return this.requestWrapper.processRequest(
name,
"getDatabase",
Collections.singletonMap("includeTableNamesPassed", includeTableNames == null ? "false" : "true"),
() -> databaseService.get(name,
GetDatabaseServiceParameters.builder()
.includeUserMetadata(includeUserMetadata)
.includeTableNames(includeTableNames == null
? config.listTableNamesByDefaultOnGetDatabase() : includeTableNames)
.disableOnReadMetadataIntercetor(false)
.build())
);
}
/**
* Get the table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
* @param includeInfo true if the details need to be included
* @param includeDefinitionMetadata true if the definition metadata to be included
* @param includeDataMetadata true if the data metadata to be included
* @return table
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}"
)
@ApiOperation(
position = 1,
value = "Table information",
notes = "Table information for the given table name under the given catalog and database")
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The table is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public TableDto getTable(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(
value = "Whether to include the core information about the table (location, serde, columns) in "
+ "the response. You would only say false here if you only want metadata."
)
@RequestParam(name = "includeInfo", defaultValue = "true") final boolean includeInfo,
@ApiParam(value = "Whether to include user definition metadata information to the response")
@RequestParam(
name = "includeDefinitionMetadata",
defaultValue = "true"
) final boolean includeDefinitionMetadata,
@ApiParam(value = "Whether to include user data metadata information to the response")
@RequestParam(name = "includeDataMetadata", defaultValue = "true") final boolean includeDataMetadata,
@ApiParam(value = "Whether to include more info details to the response. This value is considered only if "
+ "includeInfo is true.")
@RequestParam(name = "includeInfoDetails", defaultValue = "false") final boolean includeInfoDetails,
@ApiParam(value = "Whether to include only the metadata location in the response")
@RequestParam(
name = "includeMetadataLocationOnly",
defaultValue = "false") final boolean includeMetadataLocationOnly
) {
final Supplier<QualifiedName> qualifiedNameSupplier =
() -> QualifiedName.ofTable(catalogName, databaseName, tableName);
final QualifiedName name = this.requestWrapper.qualifyName(qualifiedNameSupplier);
return this.requestWrapper.processRequest(
name,
"getTable",
ImmutableMap.<String, String>builder()
.put("includeInfo", String.valueOf(includeInfo))
.put("includeDefinitionMetadata", String.valueOf(includeDefinitionMetadata))
.put("includeDataMetadata", String.valueOf(includeDataMetadata))
.put("includeMetadataFromConnector", String.valueOf(includeInfoDetails))
.put("includeMetadataLocationOnly", String.valueOf(includeMetadataLocationOnly))
.build(),
() -> {
final Optional<TableDto> table = this.tableService.get(
name,
GetTableServiceParameters.builder()
.includeInfo(includeInfo)
.includeDefinitionMetadata(includeDefinitionMetadata)
.includeDataMetadata(includeDataMetadata)
.disableOnReadMetadataIntercetor(false)
.includeMetadataFromConnector(includeInfoDetails)
.includeMetadataLocationOnly(includeMetadataLocationOnly)
.useCache(true)
.build()
);
final TableDto tableDto = table.orElseThrow(() -> new TableNotFoundException(name));
// Set the name to whatever the request was for because
// for aliases, this could've been set to the original name
tableDto.setName(qualifiedNameSupplier.get());
return tableDto;
}
);
}
/**
* Check if the table exists.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name.
*/
@RequestMapping(
method = RequestMethod.HEAD,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}"
)
@ApiOperation(
position = 1,
value = "Table information",
notes = "Table information for the given table name under the given catalog and database")
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Table exists"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "Table does not exists"
)
}
)
@Override
public void tableExists(@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName) {
final Supplier<QualifiedName> qualifiedNameSupplier =
() -> QualifiedName.ofTable(catalogName, databaseName, tableName);
final QualifiedName name = this.requestWrapper.qualifyName(qualifiedNameSupplier);
this.requestWrapper.processRequest(
name,
"exists",
() -> {
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
return null;
}
);
}
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/table-names"
)
@ApiOperation(
value = "Filtered list of table names",
notes = "Filtered list of table names for the given catalog. The filter expression pattern depends on the "
+ "catalog")
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "List of table names is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog cannot be located"
)
}
)
@Override
public List<QualifiedName> getTableNames(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "filter expression")
@RequestParam(name = "filter") final String filter,
@ApiParam(value = "Size of the list")
@Nullable @RequestParam(name = "limit", required = false, defaultValue = "-1") final Integer limit) {
final Supplier<QualifiedName> qualifiedNameSupplier =
() -> QualifiedName.ofCatalog(catalogName);
final QualifiedName name = this.requestWrapper.qualifyName(qualifiedNameSupplier);
return this.requestWrapper.processRequest(
name,
"getTableNames",
() -> {
return this.tableService.getQualifiedNames(
name,
GetTableNamesServiceParameters.builder()
.filter(filter)
.limit(limit)
.build()
);
}
);
}
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table-names"
)
@ApiOperation(
value = "Filtered list of table names",
notes = "Filtered list of table names for the given database. The filter expression pattern depends on the "
+ "catalog")
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "List of table names is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog cannot be located"
)
}
)
@Override
public List<QualifiedName> getTableNames(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "filter expression")
@RequestParam(name = "filter") final String filter,
@ApiParam(value = "Size of the list")
@Nullable @RequestParam(name = "limit", required = false, defaultValue = "-1") final Integer limit) {
final Supplier<QualifiedName> qualifiedNameSupplier =
() -> QualifiedName.ofDatabase(catalogName, databaseName);
final QualifiedName name = this.requestWrapper.qualifyName(qualifiedNameSupplier);
return this.requestWrapper.processRequest(
name,
"getTableNames",
() -> {
return this.tableService.getQualifiedNames(
name,
GetTableNamesServiceParameters.builder()
.filter(filter)
.limit(limit)
.build()
);
}
);
}
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @return list of metacat view names.
*/
@RequestMapping(method = RequestMethod.GET, path = "/catalog/{catalog-name}/mviews")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "List of metacat views",
notes = "List of metacat views for a catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The list of views is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog cannot be located"
)
}
)
public List<NameDateDto> getMViews(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName
) {
final QualifiedName name = this.requestWrapper.qualifyName(() -> QualifiedName.ofCatalog(catalogName));
return this.requestWrapper.processRequest(
name,
"getMViews",
() -> mViewService.list(name)
);
}
/**
* List of metacat view names.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @return List of metacat view names.
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mviews"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "List of metacat views",
notes = "List of metacat views for a catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The list of views is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog cannot be located"
)
}
)
public List<NameDateDto> getMViews(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"getMViews",
() -> this.mViewService.list(name)
);
}
/**
* Get metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @return metacat view
*/
@RequestMapping(
method = RequestMethod.GET,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}"
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Metacat View information",
notes = "View information for the given view name under the given catalog and database"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "The view is returned"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public TableDto getMView(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the view", required = true)
@PathVariable("view-name") final String viewName
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getMView",
() -> {
final Optional<TableDto> table = this.mViewService.getOpt(name,
GetTableServiceParameters.builder()
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.build());
return table.orElseThrow(() -> new MetacatNotFoundException("Unable to find view: " + name));
}
);
}
/**
* Rename table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param newTableName new table name
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/rename"
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 3,
value = "Rename table",
notes = "Renames the given table with the new name")
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Table successfully renamed"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public void renameTable(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the table", required = true)
@RequestParam("newTableName") final String newTableName
) {
final QualifiedName oldName = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
final QualifiedName newName = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, newTableName)
);
this.requestWrapper.processRequest(
oldName,
"renameTable",
() -> {
this.tableService.rename(oldName, newName, false);
return null;
}
);
}
/**
* Updates an existing catalog.
*
* @param catalogName catalog name
* @param createCatalogDto catalog
*/
@RequestMapping(
method = RequestMethod.PUT,
path = "/catalog/{catalog-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 4,
value = "Updates an existing catalog",
notes = "Returns success if there were no errors updating the catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Catalog successfully updated"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "No catalogs are registered with the server"
)
}
)
public void updateCatalog(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The metadata to update in the catalog", required = true)
@RequestBody final CreateCatalogDto createCatalogDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(() -> QualifiedName.ofCatalog(catalogName));
this.requestWrapper.processRequest(
name,
"updateCatalog",
() -> {
createCatalogDto.setName(name);
this.catalogService.update(name, createCatalogDto);
return null;
}
);
}
/**
* Updates the given database in the given catalog.
*
* @param catalogName catalog name.
* @param databaseName database name.
* @param databaseUpdateRequestDto database
*/
@RequestMapping(
method = RequestMethod.PUT,
path = "/catalog/{catalog-name}/database/{database-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 3,
value = "Updates the given database in the given catalog",
notes = "Given a catalog and a database name, updates the database in the catalog"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Database successfully updated"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database cannot be located"
)
}
)
public void updateDatabase(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The database information", required = true)
@RequestBody final DatabaseCreateRequestDto databaseUpdateRequestDto
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofDatabase(catalogName, databaseName)
);
this.requestWrapper.processRequest(
name,
"updateDatabase",
() -> {
final DatabaseDto newDto = new DatabaseDto();
newDto.setName(name);
newDto.setUri(databaseUpdateRequestDto.getUri());
newDto.setMetadata(databaseUpdateRequestDto.getMetadata());
newDto.setDefinitionMetadata(databaseUpdateRequestDto.getDefinitionMetadata());
this.databaseService.update(name, newDto);
return null;
}
);
}
/**
* Update metacat view.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param viewName view name
* @param table view
* @return updated metacat view
*/
@RequestMapping(
method = RequestMethod.PUT,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 3,
value = "Update mview",
notes = "Updates the given mview"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "View successfully updated"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
public TableDto updateMView(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The name of the view", required = true)
@PathVariable("view-name") final String viewName,
@ApiParam(value = "The view information", required = true)
@RequestBody final TableDto table
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)
);
return this.requestWrapper.processRequest(
name,
"getMView",
() -> this.mViewService.updateAndReturn(name, table)
);
}
/**
* Update table.
*
* @param catalogName catalog name
* @param databaseName database name
* @param tableName table name
* @param table table
* @return table
*/
@RequestMapping(
method = RequestMethod.PUT,
path = "/catalog/{catalog-name}/database/{database-name}/table/{table-name}",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ApiOperation(
position = 3,
value = "Update table",
notes = "Updates the given table"
)
@ApiResponses(
{
@ApiResponse(
code = HttpURLConnection.HTTP_OK,
message = "Table successfully updated"
),
@ApiResponse(
code = HttpURLConnection.HTTP_NOT_FOUND,
message = "The requested catalog or database or table cannot be located"
)
}
)
@Override
public TableDto updateTable(
@ApiParam(value = "The name of the catalog", required = true)
@PathVariable("catalog-name") final String catalogName,
@ApiParam(value = "The name of the database", required = true)
@PathVariable("database-name") final String databaseName,
@ApiParam(value = "The name of the table", required = true)
@PathVariable("table-name") final String tableName,
@ApiParam(value = "The table information", required = true)
@RequestBody final TableDto table
) {
final QualifiedName name = this.requestWrapper.qualifyName(
() -> QualifiedName.ofTable(catalogName, databaseName, tableName)
);
return this.requestWrapper.processRequest(
name,
"updateTable",
() -> {
Preconditions.checkArgument(table.getName() != null
&& tableName.equalsIgnoreCase(table.getName().getTableName()
),
"Table name does not match the name in the table"
);
return this.tableService.updateAndReturn(name, table);
}
);
}
}
| 2,112 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/SearchController.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api.v1;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.main.api.RequestWrapper;
import com.netflix.metacat.main.services.search.ElasticSearchUtil;
import io.swagger.annotations.ApiParam;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
/**
* Search API.
*/
@ConditionalOnProperty(value = "metacat.elasticsearch.enabled", havingValue = "true")
@RestController
@RequestMapping(
path = "/mds/v1/search",
produces = MediaType.APPLICATION_JSON_VALUE
)
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class SearchController {
private final ElasticSearchUtil elasticSearchUtil;
private final RequestWrapper requestWrapper;
/**
* Searches the list of tables for the given search string.
*
* @param searchString search string
* @return list of tables
*/
@RequestMapping(method = RequestMethod.GET, path = "/table")
@ResponseStatus(HttpStatus.OK)
public List<TableDto> searchTables(
@ApiParam(value = "The query parameter", required = true)
@RequestParam(name = "q") final String searchString
) {
return this.requestWrapper.processRequest(
"SearchMetacatV1Resource.searchTables",
() -> this.elasticSearchUtil.simpleSearch(searchString)
);
}
}
| 2,113 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/ResolverController.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api.v1;
import com.netflix.metacat.common.dto.ResolveByUriRequestDto;
import com.netflix.metacat.common.dto.ResolveByUriResponseDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
/**
* Resolver V1 Implementation as Jersey Resource.
*
* @author zhenl
* @since 1.0.0
*/
@RestController
@RequestMapping(
path = "/mds/v1/resolver",
produces = MediaType.APPLICATION_JSON_VALUE
)
@Api(
value = "ResolverV1",
description = "Metadata resolver operations",
produces = MediaType.APPLICATION_JSON_VALUE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class ResolverController {
private final TableService tableService;
private final PartitionService partitionService;
/**
* Gets the qualified name by uri.
*
* @param resolveByUriRequestDto resolveByUriRequestDto
* @param prefixSearch search by prefix flag
* @return the qualified name of uri
*/
@RequestMapping(method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
value = "Returns the list of qualified names of tables and partitions containing the given URI path",
notes = "Returns the list of qualified names of tables and partitions containing the given URI path"
)
public ResolveByUriResponseDto resolveByUri(
@ApiParam(value = "do prefix search for URI")
@RequestParam(name = "prefixSearch", defaultValue = "false") final boolean prefixSearch,
@RequestBody final ResolveByUriRequestDto resolveByUriRequestDto
) {
final ResolveByUriResponseDto result = new ResolveByUriResponseDto();
result.setTables(this.tableService.getQualifiedNames(resolveByUriRequestDto.getUri(), prefixSearch));
result.setPartitions(this.partitionService.getQualifiedNames(resolveByUriRequestDto.getUri(), prefixSearch));
return result;
}
/**
* Check if the uri used more than once.
*
* @param prefixSearch search by prefix flag
* @param resolveByUriRequestDto resolveByUriRequestDto
*/
@RequestMapping(
method = RequestMethod.POST,
path = "/isUriUsedMoreThanOnce",
consumes = MediaType.APPLICATION_JSON_VALUE
)
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 1,
value = "Returns status 204 if the given URI is being referred more than once."
+ " Returns status 404 if the given URI is not found or not being referred more than once.",
notes = "Returns status 204 if the given URI is being referred more than once."
+ " Returns status 404 if the given URI is not found or not being referred more than once.")
public void isUriUsedMoreThanOnce(
@ApiParam(value = "do prefix search for URI", defaultValue = "false")
@RequestParam(name = "prefixSearch", defaultValue = "false") final Boolean prefixSearch,
@RequestBody final ResolveByUriRequestDto resolveByUriRequestDto
) {
final String uri = resolveByUriRequestDto.getUri();
int size = this.tableService.getQualifiedNames(uri, prefixSearch).size();
if (size < 2) {
size += this.partitionService.getQualifiedNames(uri, prefixSearch).size();
}
if (size <= 1) {
throw new MetacatNotFoundException("URI not found more than once");
}
}
}
| 2,114 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* V1 API Controllers.
*
* @author tgianos
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.api.v1;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,115 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/api/v1/MetadataController.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.api.v1;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DataMetadataDto;
import com.netflix.metacat.common.dto.DataMetadataGetRequestDto;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.api.RequestWrapper;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.MetacatServiceHelper;
import com.netflix.metacat.main.services.MetadataService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Optional;
import java.util.Set;
/**
* Metadata V1 API implementation.
*
* @author amajumdar
*/
@RestController
@RequestMapping(
path = "/mds/v1/metadata",
produces = MediaType.APPLICATION_JSON_VALUE
)
@Api(
value = "MetadataV1",
description = "Federated user metadata operations",
produces = MediaType.APPLICATION_JSON_VALUE,
consumes = MediaType.APPLICATION_JSON_VALUE
)
@DependsOn("metacatCoreInitService")
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class MetadataController {
private final UserMetadataService userMetadataService;
private final MetacatServiceHelper helper;
private final MetadataService metadataService;
private final RequestWrapper requestWrapper;
/**
* Returns the data metadata.
*
* @param metadataGetRequestDto metadata request
* @return data metadata
*/
@RequestMapping(method = RequestMethod.POST, path = "/data", consumes = MediaType.APPLICATION_JSON_VALUE)
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 1,
value = "Returns the data metadata",
notes = "Returns the data metadata"
)
public DataMetadataDto getDataMetadata(@RequestBody final DataMetadataGetRequestDto metadataGetRequestDto) {
return this.requestWrapper.processRequest(
"getDataMetadata",
() -> {
DataMetadataDto result = null;
if (metadataGetRequestDto.getUri() != null) {
final Optional<ObjectNode> o
= this.userMetadataService.getDataMetadata(metadataGetRequestDto.getUri());
if (o.isPresent()) {
result = new DataMetadataDto();
result.setDataMetadata(o.get());
result.setUri(metadataGetRequestDto.getUri());
}
}
return result;
}
);
}
/**
* Returns the list of definition metadata. Client should be aware that
* this api does not apply the metadata read interceptor,
* it queries the original results from database. The definition metadata results from this API can
* be different from the table get API.
* TODO: we need to find a way to address the interceptor application or remove this API.
*
* @param sortBy Sort the list by this value
* @param sortOrder Sorting order to use
* @param offset Offset of the list returned
* @param limit Size of the list
* @param lifetime has lifetime set
* @param type Type of the metadata item. Values: database, table, partition
* @param name Text that matches the name of the metadata (accepts sql wildcards)
* @param dataProperties Set of data property names.
* Filters the returned list that only contains the given property names
* @return list of definition metadata
*/
@RequestMapping(method = RequestMethod.GET, path = "/definition/list")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 2,
value = "Returns the definition metadata",
notes = "Returns the definition metadata"
)
public List<DefinitionMetadataDto> getDefinitionMetadataList(
@ApiParam(value = "Sort the list by this value")
@Nullable @RequestParam(name = "sortBy", required = false) final String sortBy,
@ApiParam(value = "Sorting order to use")
@Nullable @RequestParam(name = "sortOrder", required = false) final SortOrder sortOrder,
@ApiParam(value = "Offset of the list returned")
@Nullable @RequestParam(name = "offset", required = false) final Integer offset,
@ApiParam(value = "Size of the list")
@Nullable @RequestParam(name = "limit", required = false) final Integer limit,
@ApiParam(value = "has lifetime set", defaultValue = "false")
@RequestParam(name = "lifetime", defaultValue = "false") final boolean lifetime,
@ApiParam(value = "Type of the metadata item. Values: database, table, partition")
@Nullable @RequestParam(name = "type", required = false) final String type,
@ApiParam(value = "Text that matches the name of the metadata (accepts sql wildcards)")
@Nullable @RequestParam(name = "name", required = false) final String name,
@ApiParam(
value = "Set of data property names. Filters the returned list that only contains the given property names"
)
@Nullable @RequestParam(name = "data-property", required = false) final Set<String> dataProperties
) {
final Set<String> localDataProperties = dataProperties != null ? dataProperties : Sets.newHashSet();
if (lifetime) {
localDataProperties.add("lifetime");
}
return requestWrapper.processRequest(
"getDefinitionMetadataList",
() -> this.userMetadataService.searchDefinitionMetadata(
localDataProperties,
type,
name,
getTableDto(name),
sortBy,
sortOrder != null ? sortOrder.name() : null,
offset,
limit
)
);
}
private TableDto getTableDto(@Nullable final String name) {
Optional<TableDto> optionalTableDto = Optional.empty();
if (name != null) {
final QualifiedName qualifiedName = QualifiedName.fromString(name);
if (qualifiedName.isTableDefinition()) {
optionalTableDto = this.metadataService.getTableService().get(qualifiedName, GetTableServiceParameters
.builder().disableOnReadMetadataIntercetor(true)
.includeInfo(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.build());
}
}
return optionalTableDto.isPresent() ? optionalTableDto.get() : null;
}
/**
* Returns the list of qualified names owned by the given owners.
*
* @param owners set of owners
* @return the list of qualified names owned by the given owners
*/
@RequestMapping(method = RequestMethod.GET, path = "/searchByOwners")
@ResponseStatus(HttpStatus.OK)
@ApiOperation(
position = 3,
value = "Returns the qualified names owned by the given owners",
notes = "Returns the qualified names owned by the given owners"
)
public List<QualifiedName> searchByOwners(
@ApiParam(value = "Set of owners", required = true)
@RequestParam("owner") final Set<String> owners
) {
return this.requestWrapper.processRequest(
"searchByOwners",
() -> userMetadataService.searchByOwners(owners)
);
}
/**
* Delete the definition metadata for the given name.
*
* @param name Name of definition metadata to be deleted
* @param force If true, deletes the metadata without checking if the database/table/partition exists
*/
@RequestMapping(method = RequestMethod.DELETE, path = "/definition")
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
position = 4,
value = "Deletes the given definition metadata"
)
public void deleteDefinitionMetadata(
@ApiParam(value = "Name of definition metadata to be deleted", required = true)
@RequestParam(name = "name") final String name,
@ApiParam(value = "If true, deletes the metadata without checking if the database/table/partition exists")
@RequestParam(name = "force", defaultValue = "false") final boolean force
) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
requestWrapper.processRequest(
"deleteDefinitionMetadata",
() -> {
metadataService.deleteDefinitionMetadata(QualifiedName.fromString(name), force, metacatRequestContext);
return null;
}
);
}
/**
* Deletes the data metadata marked for deletion.
*/
@RequestMapping(method = RequestMethod.DELETE, path = "/data/cleanup")
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
hidden = true,
value = "Admin API to delete obsolete data metadata"
)
public void cleanUpDeletedDataMetadata() {
this.metadataService.cleanUpDeletedDataMetadata();
}
/**
* Deletes the obsolete metadata.
*/
@RequestMapping(method = RequestMethod.DELETE, path = "/definition/cleanup")
@ResponseStatus(HttpStatus.NO_CONTENT)
@ApiOperation(
hidden = true,
value = "Admin API to delete obsolete metadata"
)
public void cleanUpObsoleteMetadata() {
this.metadataService.cleanUpObsoleteDefinitionMetadata();
}
}
| 2,116 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/IcebergTableEventHandler.java
|
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.events.AsyncListener;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateIcebergTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Component;
/**
* Handler for Iceberg table specific events.
*/
@Slf4j
@Component
@AsyncListener
public class IcebergTableEventHandler {
private final TableService tableService;
private final MetacatEventBus eventBus;
private final Registry registry;
/**
* Constructor.
*
* @param tableService The table service.
* @param eventBus The metacat event bus.
* @param registry The registry.
*/
@Autowired
public IcebergTableEventHandler(
final TableService tableService,
final MetacatEventBus eventBus,
final Registry registry
) {
this.tableService = tableService;
this.eventBus = eventBus;
this.registry = registry;
}
/**
* The update table event handler.
*
* @param event The event.
*/
@EventListener
public void metacatUpdateTableEventHandler(final MetacatUpdateIcebergTablePostEvent event) {
final QualifiedName name = event.getName();
final TableDto tableDto = event.getRequestTable();
TableDto updatedDto = tableDto;
try {
updatedDto = tableService.get(name,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception ex) {
handleException(name, "getTable", ex);
}
try {
eventBus.post(new MetacatUpdateTablePostEvent(event.getName(), event.getRequestContext(),
this, event.getOldTable(),
updatedDto, updatedDto != tableDto));
} catch (Exception ex) {
handleException(name, "postEvent", ex);
}
}
private void handleException(final QualifiedName name,
final String request,
final Exception ex) {
log.warn("Failed {} for table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableUpdateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
}
}
| 2,117 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/GetTableServiceParameters.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services;
import lombok.Builder;
import lombok.Value;
/**
* Getable Parameters.
*
* @author zhenl
* @since 1.2.0
*/
@Value
@Builder
public class GetTableServiceParameters {
private final boolean includeInfo;
private final boolean includeDefinitionMetadata;
private final boolean includeDataMetadata;
private final boolean disableOnReadMetadataIntercetor;
private final boolean useCache;
private final boolean includeMetadataFromConnector;
private final boolean includeMetadataLocationOnly;
}
| 2,118 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MetacatServiceHelper.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.BaseDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePreEvent;
import java.util.List;
/**
* Generic Service helper.
*
* @author amajumdar
*/
public class MetacatServiceHelper {
/**
* Defines the table type.
*/
public static final String PARAM_TABLE_TYPE = "table_type";
/**
* Iceberg table type.
*/
public static final String ICEBERG_TABLE_TYPE = "ICEBERG";
private final DatabaseService databaseService;
private final TableService tableService;
private final PartitionService partitionService;
private final MViewService mViewService;
private final MetacatEventBus eventBus;
/**
* Constructor.
*
* @param databaseService database service
* @param tableService table service
* @param partitionService partition service
* @param mViewService mview service
* @param eventBus event bus
*/
public MetacatServiceHelper(
final DatabaseService databaseService,
final TableService tableService,
final PartitionService partitionService,
final MViewService mViewService,
final MetacatEventBus eventBus
) {
this.databaseService = databaseService;
this.tableService = tableService;
this.partitionService = partitionService;
this.mViewService = mViewService;
this.eventBus = eventBus;
}
/**
* Get the relevant service for the given qualified name.
*
* @param name name
* @return service
*/
public MetacatService getService(final QualifiedName name) {
final MetacatService result;
if (name.isPartitionDefinition()) {
result = partitionService;
} else if (name.isViewDefinition()) {
result = mViewService;
} else if (name.isTableDefinition()) {
result = tableService;
} else if (name.isDatabaseDefinition()) {
result = databaseService;
} else {
throw new IllegalArgumentException(String.format("Invalid name %s", name));
}
return result;
}
/**
* Calls the right method of the event bus for the given qualified name.
*
* @param name name
* @param metacatRequestContext context
* @param dto dto
*/
public void postPreUpdateEvent(
final QualifiedName name,
final MetacatRequestContext metacatRequestContext,
final BaseDto dto
) {
if (name.isPartitionDefinition()) {
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
if (dto != null) {
partitionsSaveRequestDto.setPartitions(ImmutableList.of((PartitionDto) dto));
}
this.eventBus.post(
new MetacatSaveTablePartitionPreEvent(name, metacatRequestContext, this, partitionsSaveRequestDto)
);
} else if (name.isViewDefinition()) {
this.eventBus.post(
new MetacatUpdateMViewPreEvent(name, metacatRequestContext, this, (TableDto) dto)
);
} else if (name.isTableDefinition()) {
this.eventBus.post(
new MetacatUpdateTablePreEvent(name, metacatRequestContext, this, (TableDto) dto, (TableDto) dto)
);
} else if (name.isDatabaseDefinition()) {
eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatRequestContext, this));
} else {
throw new IllegalArgumentException(String.format("Invalid name %s", name));
}
}
/**
* Calls the right method of the event bus for the given qualified name.
*
* @param name name
* @param metacatRequestContext context
* @param oldDTo dto
* @param currentDto dto
*/
public void postPostUpdateEvent(
final QualifiedName name,
final MetacatRequestContext metacatRequestContext,
final BaseDto oldDTo,
final BaseDto currentDto
) {
if (name.isPartitionDefinition()) {
final List<PartitionDto> dtos = Lists.newArrayList();
if (currentDto != null) {
dtos.add((PartitionDto) currentDto);
}
// This request neither added nor updated partitions
final PartitionsSaveResponseDto partitionsSaveResponseDto = new PartitionsSaveResponseDto();
this.eventBus.post(
new MetacatSaveTablePartitionPostEvent(
name,
metacatRequestContext,
this,
dtos,
partitionsSaveResponseDto
)
);
} else if (name.isViewDefinition()) {
final MetacatUpdateMViewPostEvent event = new MetacatUpdateMViewPostEvent(
name,
metacatRequestContext,
this,
(TableDto) currentDto
);
this.eventBus.post(event);
} else if (name.isTableDefinition()) {
final MetacatUpdateTablePostEvent event = new MetacatUpdateTablePostEvent(
name,
metacatRequestContext,
this,
(TableDto) oldDTo,
(TableDto) currentDto
);
this.eventBus.post(event);
} else if (name.isDatabaseDefinition()) {
this.eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
} else {
throw new IllegalArgumentException(String.format("Invalid name %s", name));
}
}
/**
* Calls the right method of the event bus for the given qualified name.
*
* @param name name
* @param metacatRequestContext context
*/
public void postPreDeleteEvent(
final QualifiedName name,
final MetacatRequestContext metacatRequestContext
) {
if (name.isPartitionDefinition()) {
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
partitionsSaveRequestDto.setPartitionIdsForDeletes(Lists.newArrayList(name.getPartitionName()));
this.eventBus.post(
new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, partitionsSaveRequestDto)
);
} else if (name.isViewDefinition()) {
this.eventBus.post(
new MetacatDeleteMViewPreEvent(name, metacatRequestContext, this)
);
} else if (name.isTableDefinition()) {
this.eventBus.post(new MetacatDeleteTablePreEvent(name, metacatRequestContext, this));
} else if (name.isDatabaseDefinition()) {
final DatabaseDto dto = new DatabaseDto();
dto.setName(name);
eventBus.post(new MetacatDeleteDatabasePreEvent(name, metacatRequestContext, this, dto));
} else {
throw new IllegalArgumentException(String.format("Invalid name %s", name));
}
}
/**
* Calls the right method of the event bus for the given qualified name.
*
* @param name name
* @param metacatRequestContext context
*/
public void postPostDeleteEvent(
final QualifiedName name,
final MetacatRequestContext metacatRequestContext
) {
if (name.isPartitionDefinition()) {
this.eventBus.post(
new MetacatDeleteTablePartitionPostEvent(
name,
metacatRequestContext,
this,
Lists.newArrayList(PartitionDto.builder().name(name).build())
)
);
} else if (name.isViewDefinition()) {
final TableDto dto = new TableDto();
dto.setName(name);
this.eventBus.post(new MetacatDeleteMViewPostEvent(name, metacatRequestContext, this, dto));
} else if (name.isTableDefinition()) {
final TableDto dto = new TableDto();
dto.setName(name);
this.eventBus.post(new MetacatDeleteTablePostEvent(name, metacatRequestContext, this, dto,
false));
} else if (name.isDatabaseDefinition()) {
this.eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
} else {
throw new IllegalArgumentException(String.format("Invalid name %s", name));
}
}
/**
* check if the table is an Iceberg Table.
*
* @param tableDto table dto
* @return true for iceberg table
*/
public static boolean isIcebergTable(final TableDto tableDto) {
return tableDto.getMetadata() != null
&& tableDto.getMetadata().containsKey(PARAM_TABLE_TYPE)
&& ICEBERG_TABLE_TYPE
.equalsIgnoreCase(tableDto.getMetadata().get(PARAM_TABLE_TYPE));
}
}
| 2,119 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MetacatThriftService.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.thrift.CatalogThriftService;
import com.netflix.metacat.thrift.CatalogThriftServiceFactory;
import javax.inject.Inject;
import java.util.List;
import java.util.stream.Collectors;
/**
* Metacat thrift service.
*
* @author zhenl
* @since 1.1.0
*/
public class MetacatThriftService {
private final ConnectorManager connectorManager;
private final CatalogThriftServiceFactory thriftServiceFactory;
/**
* Constructor.
*
* @param catalogThriftServiceFactory factory
* @param connectorManager connecter manager
*/
@Inject
public MetacatThriftService(final CatalogThriftServiceFactory catalogThriftServiceFactory,
final ConnectorManager connectorManager) {
this.thriftServiceFactory = catalogThriftServiceFactory;
this.connectorManager = connectorManager;
}
public List<CatalogThriftService> getCatalogThriftServices() {
return connectorManager.getCatalogConfigs()
.stream()
.filter(MetacatCatalogConfig::isThriftInterfaceRequested)
.map(catalog -> thriftServiceFactory.create(catalog.getCatalogName(), catalog.getThriftPort()))
.collect(Collectors.toList());
}
/**
* Start.
*
* @throws Exception error
*/
public void start() throws Exception {
for (CatalogThriftService service : getCatalogThriftServices()) {
service.start();
}
}
/**
* Stop.
*
* @throws Exception error
*/
public void stop() throws Exception {
for (CatalogThriftService service : getCatalogThriftServices()) {
service.stop();
}
}
}
| 2,120 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MetacatService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.BaseDto;
/**
* Base service interface for all entities like catalog, database, table, view and partition.
*
* @author amajumdar,zhenl
*
* @param <T> Resource entity type.
*/
public interface MetacatService<T extends BaseDto> {
/**
* Creates the object.
*
* @param name qualified name of the object
* @param dto object metadata
* @return created object
*/
T create(QualifiedName name, T dto);
/**
* Updates the object.
*
* @param name qualified name of the object
* @param dto object dto
*/
void update(QualifiedName name, T dto);
/**
* Updates the object and return the updated object.
*
* @param name qualified name of the object
* @param dto object dto
* @return updated object
*/
T updateAndReturn(QualifiedName name, T dto);
/**
* Deletes the object. Returns the metadata of the object deleted.
*
* @param name qualified name of the object to be deleted
*/
void delete(QualifiedName name);
/**
* Returns the object with the given name.
*
* @param name qualified name of the object
* @return Returns the object with the given name
*/
T get(QualifiedName name);
/**
* Returns true, if the object exists.
*
* @param name qualified name of the object
* @return boolean
*/
boolean exists(QualifiedName name);
}
| 2,121 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogTraversal.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.google.common.base.Functions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.spectator.api.Registry;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.Instant;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* This class does a refresh of all the metadata entities from original data sources to elastic search.
*
* @author amajumdar
*/
@Slf4j
public class CatalogTraversal {
private static final Predicate<Object> NOT_NULL = Objects::nonNull;
private static AtomicBoolean isTraversalAlreadyRunning = new AtomicBoolean(false);
private final CatalogTraversalServiceHelper catalogTraversalServiceHelper;
private final List<CatalogTraversalAction> actions;
private final Config config;
private Registry registry;
// Traversal state
private Context context;
// Fixed thread pool
private ListeningExecutorService service;
private ListeningExecutorService actionService;
private ExecutorService defaultService;
/**
* Constructor.
*
* @param config System config
* @param catalogTraversalServiceHelper Catalog service helper
* @param registry registry of spectator
*/
public CatalogTraversal(
@Nonnull @NonNull final Config config,
@Nonnull @NonNull final CatalogTraversalServiceHelper catalogTraversalServiceHelper,
@Nonnull @NonNull final Registry registry
) {
this.config = config;
this.actions = Lists.newArrayList();
this.catalogTraversalServiceHelper = catalogTraversalServiceHelper;
this.registry = registry;
}
private static ExecutorService newFixedThreadPool(
final int nThreads,
final String threadFactoryName,
final int queueSize
) {
return new ThreadPoolExecutor(nThreads, nThreads,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(queueSize),
new ThreadFactoryBuilder()
.setNameFormat(threadFactoryName)
.build(),
(r, executor) -> {
// this will block if the queue is full
try {
executor.getQueue().put(r);
} catch (InterruptedException e) {
throw Throwables.propagate(e);
}
});
}
/**
* Adds the action handlers.
* @param actionHandlers list of action handlers
*/
public void addActions(final List<CatalogTraversalAction> actionHandlers) {
this.actions.addAll(actionHandlers);
}
/**
* Does a sweep across all catalogs to refresh the same data in elastic search.
*/
public void process() {
processCatalogs(catalogTraversalServiceHelper.getCatalogNames());
}
/**
* Does a sweep across given catalogs to refresh the same data in elastic search.
*
* @param catalogNames catalog names
*/
public void processCatalogs(final List<String> catalogNames) {
final List<QualifiedName> qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
_process(qNames, () -> _processCatalogs(catalogNames), "processCatalogs", true, 1000);
}
@SuppressWarnings("checkstyle:methodname")
private void _process(final List<QualifiedName> qNames, final Supplier<ListenableFuture<Void>> supplier,
final String requestName, final boolean delete, final int queueSize) {
if (isTraversalAlreadyRunning.compareAndSet(false, true)) {
final long start = registry.clock().wallTime();
try {
log.info("Start Traversal: Full catalog traversal. Processing {} ...", qNames);
final MetacatRequestContext requestContext = MetacatRequestContext.builder()
.userName("admin")
.clientAppName("catalogTraversal")
.apiUri("catalogTraversal")
.scheme("internal")
.build();
MetacatContextManager.setContext(requestContext);
final Instant startInstant = Instant.now();
context = new Context(startInstant.toString(), startInstant, qNames,
config.getElasticSearchRefreshExcludeQualifiedNames());
service = MoreExecutors
.listeningDecorator(newFixedThreadPool(10, "catalog-traversal-%d", queueSize));
actionService = MoreExecutors
.listeningDecorator(newFixedThreadPool(5, "catalog-traversal-action-service-%d", queueSize));
defaultService = Executors.newSingleThreadExecutor();
actions.forEach(a -> a.init(context));
supplier.get().get(24, TimeUnit.HOURS);
actions.forEach(a -> a.done(context));
log.info("End Traversal: Full catalog traversal");
} catch (Exception e) {
log.error("Traversal: Full catalog traversal failed", e);
registry.counter(registry.createId(Metrics.CounterCatalogTraversal.getMetricName())
.withTags(Metrics.tagStatusFailureMap)).increment();
} finally {
try {
shutdown(service);
shutdown(defaultService);
} finally {
isTraversalAlreadyRunning.set(false);
final long duration = registry.clock().wallTime() - start;
this.registry.timer(Metrics.TimerCatalogTraversal.getMetricName()
+ "." + requestName).record(duration, TimeUnit.MILLISECONDS);
log.info("### Time taken to complete {} is {} ms", requestName, duration);
}
actions.clear();
}
} else {
log.info("Traversal: Full catalog traversal is already running.");
registry.counter(registry.createId(Metrics.CounterCatalogTraversalAlreadyRunning.getMetricName()))
.increment();
}
}
private void shutdown(@Nullable final ExecutorService executorService) {
if (executorService != null) {
executorService.shutdown();
try {
// Wait a while for existing tasks to terminate
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
executorService.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
log.warn("Thread pool for metacat traversal did not terminate");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executorService.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
}
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processCatalogs(final List<String> catalogNames) {
log.info("Start: Full traversal of catalogs: {}", catalogNames);
final List<List<String>> subCatalogNamesList = Lists.partition(catalogNames, 5);
final List<ListenableFuture<Void>> futures =
subCatalogNamesList.stream().map(this::_processSubCatalogList).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(futures), Functions.constant(null), defaultService);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processSubCatalogList(final List<String> catalogNames) {
log.info("Start: Full traversal of catalogs: {}", catalogNames);
final List<ListenableFuture<CatalogDto>> getCatalogFutures = catalogNames.stream()
.map(catalogName -> service.submit(() -> {
CatalogDto result = null;
try {
result = catalogTraversalServiceHelper.getCatalog(catalogName);
} catch (Exception e) {
log.error("Traversal: Failed to retrieve catalog: {}", catalogName);
registry.counter(
registry.createId(Metrics.CounterCatalogTraversalCatalogReadFailed.getMetricName())
.withTag("catalogName", catalogName))
.increment();
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getCatalogFutures),
input -> {
final ListenableFuture<Void> processCatalogFuture = applyCatalogs(input);
final List<ListenableFuture<Void>> processCatalogFutures = input.stream().filter(NOT_NULL).map(
catalogDto -> {
final List<QualifiedName> databaseNames = getDatabaseNamesToRefresh(catalogDto);
return _processDatabases(catalogDto, databaseNames);
}).filter(NOT_NULL).collect(Collectors.toList());
processCatalogFutures.add(processCatalogFuture);
return Futures.transform(Futures.successfulAsList(processCatalogFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
private List<QualifiedName> getDatabaseNamesToRefresh(final CatalogDto catalogDto) {
final List<QualifiedName> result = catalogDto.getDatabases().stream()
.map(n -> QualifiedName.ofDatabase(catalogDto.getName().getCatalogName(), n))
.collect(Collectors.toList());
final List<QualifiedName> excludeQNames = context.getExcludeQNames();
if (excludeQNames != null && !excludeQNames.isEmpty()) {
result.removeAll(excludeQNames);
}
return result;
}
/**
* Process the list of databases.
*
* @param catalogDto catalog dto
* @param databaseNames database names
* @return future
*/
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processDatabases(final CatalogDto catalogDto,
final List<QualifiedName> databaseNames) {
ListenableFuture<Void> resultFuture = null;
final QualifiedName catalogName = catalogDto.getName();
log.info("Traversal: Full traversal of catalog {} for databases({}): {}",
catalogName, databaseNames.size(), databaseNames);
final List<ListenableFuture<DatabaseDto>> getDatabaseFutures = databaseNames.stream()
.map(databaseName -> service.submit(() -> {
DatabaseDto result = null;
try {
result = catalogTraversalServiceHelper.getDatabase(catalogDto, databaseName);
} catch (Exception e) {
log.error("Traversal: Failed to retrieve database: {}", databaseName);
registry.counter(
registry.createId(Metrics.CounterCatalogTraversalDatabaseReadFailed.getMetricName())
.withTags(databaseName.parts()))
.increment();
}
return result;
}))
.collect(Collectors.toList());
if (getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) {
resultFuture = Futures.transformAsync(Futures.successfulAsList(getDatabaseFutures),
input -> {
final ListenableFuture<Void> processDatabaseFuture = applyDatabases(catalogName, input);
final List<ListenableFuture<Void>> processDatabaseFutures = input.stream().filter(NOT_NULL)
.map(databaseDto -> {
final List<QualifiedName> tableNames = databaseDto.getTables().stream()
.map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(),
databaseDto.getName().getDatabaseName(), s))
.collect(Collectors.toList());
log.info("Traversal: Full traversal of database {} for tables({}): {}",
databaseDto.getName(),
databaseDto.getTables().size(), databaseDto.getTables());
return processTables(databaseDto, tableNames);
}).filter(NOT_NULL).collect(Collectors.toList());
processDatabaseFutures.add(processDatabaseFuture);
return Futures.transform(Futures.successfulAsList(processDatabaseFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
return resultFuture;
}
/**
* Apply all catalogs to all registered actions.
*
* @param dtos catalog dtos
* @return future
*/
private ListenableFuture<Void> applyCatalogs(final List<CatalogDto> dtos) {
final List<ListenableFuture<Void>> actionFutures = actions.stream()
.map(a -> actionService.submit((Callable<Void>) () -> {
a.applyCatalogs(context, dtos);
return null;
})).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(actionFutures),
Functions.constant(null), defaultService);
}
/**
* Apply all databases to all registered actions.
*
* @param name catalog name
* @param dtos database dtos
* @return future
*/
private ListenableFuture<Void> applyDatabases(final QualifiedName name, final List<DatabaseDto> dtos) {
log.info("Traversal: Apply databases for catalog: {}", name);
final List<ListenableFuture<Void>> actionFutures = actions.stream()
.map(a -> actionService.submit((Callable<Void>) () -> {
a.applyDatabases(context, dtos);
return null;
})).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(actionFutures),
Functions.constant(null), defaultService);
}
/**
* Apply all tables to all registered actions.
*
* @param name database Name
* @param dtos table dtos
* @return future
*/
private ListenableFuture<Void> applyTables(final QualifiedName name, final List<Optional<TableDto>> dtos) {
log.info("Traversal: Apply tables for database: {}", name);
final List<ListenableFuture<Void>> actionFutures = actions.stream()
.map(a -> actionService.submit((Callable<Void>) () -> {
a.applyTables(context, dtos);
return null;
})).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(actionFutures),
Functions.constant(null), defaultService);
}
/**
* Process the list of tables in batches.
*
* @param databaseDto database dto
* @param tableNames table names
* @return A future containing the tasks
*/
private ListenableFuture<Void> processTables(final DatabaseDto databaseDto,
final List<QualifiedName> tableNames) {
final List<List<QualifiedName>> tableNamesBatches = Lists.partition(tableNames, 500);
final List<ListenableFuture<Void>> processTablesBatchFutures = tableNamesBatches.stream().map(
subTableNames -> _processTables(databaseDto, subTableNames)).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(processTablesBatchFutures),
Functions.constant(null), defaultService);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processTables(final DatabaseDto databaseDto,
final List<QualifiedName> tableNames) {
final QualifiedName databaseName = databaseDto.getName();
final List<ListenableFuture<Optional<TableDto>>> getTableFutures = tableNames.stream()
.map(tableName -> service.submit(() -> {
Optional<TableDto> result = null;
try {
result = catalogTraversalServiceHelper.getTable(databaseDto, tableName);
} catch (Exception e) {
log.error("Traversal: Failed to retrieve table: {}", tableName);
registry.counter(
registry.createId(Metrics.CounterCatalogTraversalTableReadFailed.getMetricName())
.withTags(tableName.parts()))
.increment();
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getTableFutures),
input -> applyTables(databaseName, input), defaultService);
}
/**
* Traversal context.
*/
@Data
@AllArgsConstructor
public static class Context {
private String runId;
private Instant startInstant;
private List<QualifiedName> qNames;
private List<QualifiedName> excludeQNames;
}
}
| 2,122 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MViewService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.TableDto;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Optional;
/**
* View service.
*/
public interface MViewService extends MetacatService<TableDto> {
/**
* Create the view and returns the newly created view.
*
* @param name name of the origin table
* @return view
*/
TableDto create(QualifiedName name);
/**
* Create the view and returns the newly created view.
*
* @param name name of the origin table
* @param snapshot To snapshot a list of partitions of the table to this view.
* @param filter Filter expression string to use
* @return view
*/
TableDto createAndSnapshotPartitions(QualifiedName name, boolean snapshot, @Nullable String filter);
/**
* Deletes the view and returns the deleted view.
*
* @param name name of the view to be deleted
* @return deleted view
*/
TableDto deleteAndReturn(QualifiedName name);
/**
* Get the view for the given name.
*
* @param name name
* @param parameters getTable parameters
* @return view
*/
Optional<TableDto> getOpt(QualifiedName name, GetTableServiceParameters parameters);
/**
* Copy partitions from the given table name.
*
* @param name table name
* @param filter filter
*/
void snapshotPartitions(QualifiedName name, String filter);
/**
* Saves the list of partitions to the given view.
*
* @param name name
* @param partitionsSaveRequestDto request dto containing the partitions to be added and deleted
* @param merge if true, this method merges
* @return no. of partitions added and updated.
*/
PartitionsSaveResponseDto savePartitions(QualifiedName name,
PartitionsSaveRequestDto partitionsSaveRequestDto, boolean merge);
/**
* Deletes the list of partitions with the given ids <code>partitionIds</code>.
*
* @param name view name
* @param partitionIds partition names
*/
void deletePartitions(QualifiedName name, List<String> partitionIds);
/**
* Returns the list of partitions.
*
* @param name view name
* @param sort sort info
* @param pageable pagination info
* @param includeUserMetadata if true, includes the user metadata
* @param getPartitionsRequestDto get partitions request
* @return list of partitions
*/
List<PartitionDto> listPartitions(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
boolean includeUserMetadata,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto);
/**
* Returns a list of partition names.
*
* @param name view name
* @param sort sort info
* @param pageable pagination info
* @param getPartitionsRequestDto get partition request dto
* @return list of partition names
*/
List<String> getPartitionKeys(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto
);
/**
* Returns a list of partition uris.
*
* @param name view name
* @param sort sort info
* @param pageable pagination info
* @param getPartitionsRequestDto get partition request dto
* @return list of partition uris
*/
List<String> getPartitionUris(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto);
/**
* Partition count for the given view name.
*
* @param name view name
* @return no. of partitions
*/
Integer partitionCount(QualifiedName name);
/**
* Returns the list of view names for the given name.
*
* @param qualifiedName name
* @return list of view names
*/
List<NameDateDto> list(QualifiedName qualifiedName);
/**
* Save metadata for the view.
*
* @param name view name
* @param definitionMetadata definition metadata
* @param dataMetadata data metadata
*/
void saveMetadata(QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata);
/**
* Rename view.
*
* @param name view name
* @param newViewName new view name
*/
void rename(QualifiedName name, QualifiedName newViewName);
}
| 2,123 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/GetTableNamesServiceParameters.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import lombok.Builder;
import lombok.Value;
/**
* GetTableNames Parameters.
*
* @author amajumdar
* @since 1.3.0
*/
@Value
@Builder
public class GetTableNamesServiceParameters {
private final String filter;
private final Integer limit;
}
| 2,124 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogTraversalServiceHelper.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Service helper class for catalog traversal.
*/
public class CatalogTraversalServiceHelper {
protected final CatalogService catalogService;
protected final TableService tableService;
protected final DatabaseService databaseService;
/**
* Constructor.
*
* @param catalogService Catalog service
* @param databaseService Database service
* @param tableService Table service
*/
public CatalogTraversalServiceHelper(
@Nonnull @NonNull final CatalogService catalogService,
@Nonnull @NonNull final DatabaseService databaseService,
@Nonnull @NonNull final TableService tableService
) {
this.catalogService = catalogService;
this.databaseService = databaseService;
this.tableService = tableService;
}
/**
* Returns the list of catalog names.
* @return list of catalog names
*/
public List<String> getCatalogNames() {
return catalogService.getCatalogNames().stream().map(CatalogMappingDto::getCatalogName).collect(
Collectors.toList());
}
/**
* Returns the catalog for the given <code>name</code>.
* @param catalogName catalog name
* @return catalog
*/
public CatalogDto getCatalog(final String catalogName) {
return catalogService.get(QualifiedName.ofCatalog(catalogName));
}
/**
* Returns the database for the given <code>databaseName</code>.
* @param catalogDto catalog dto
* @param databaseName database name
* @return database
*/
public DatabaseDto getDatabase(final CatalogDto catalogDto, final QualifiedName databaseName) {
return databaseService.get(databaseName,
GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeTableNames(true)
.includeUserMetadata(true)
.build());
}
/**
* Returns the table for the given <code>tableName</code>.
* @param databaseDto database dto
* @param tableName table name
* @return table dto
*/
public Optional<TableDto> getTable(final DatabaseDto databaseDto, final QualifiedName tableName) {
return tableService.get(tableName, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
}
}
| 2,125 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/DatabaseService.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
/**
* Database service.
*/
public interface DatabaseService extends MetacatService<DatabaseDto> {
/**
* Gets the database with the given name.
* @param name qualified name of the table
* @param getDatabaseServiceParameters get table request
* @return database info with the given name
*/
DatabaseDto get(QualifiedName name, GetDatabaseServiceParameters getDatabaseServiceParameters);
}
| 2,126 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MetadataService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.BaseDto;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.spectator.api.Registry;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.DateTime;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Metadata Service. This class includes any common services for the user metadata.
*
* @author amajumdar
*/
@Slf4j
@Getter
public class MetadataService {
private final Config config;
private final TableService tableService;
private final PartitionService partitionService;
private final UserMetadataService userMetadataService;
private final TagService tagService;
private final MetacatServiceHelper helper;
private final Registry registry;
/**
* Constructor.
*
* @param config configuration
* @param tableService table service
* @param partitionService partition service
* @param userMetadataService user metadata service
* @param tagService tag service
* @param helper service helper
* @param registry registry
*/
public MetadataService(final Config config,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final TagService tagService,
final MetacatServiceHelper helper,
final Registry registry) {
this.config = config;
this.tableService = tableService;
this.partitionService = partitionService;
this.userMetadataService = userMetadataService;
this.tagService = tagService;
this.helper = helper;
this.registry = registry;
}
/**
* Deletes all the data metadata marked for deletion.
*/
public void cleanUpDeletedDataMetadata() {
// Get the data metadata that were marked deleted a number of days back
// Check if the uri is being used
// If uri is not used then delete the entry from data_metadata
log.info("Start deleting data metadata");
try {
final DateTime priorTo = DateTime.now().minusDays(config.getDataMetadataDeleteMarkerLifetimeInDays());
final int limit = 100000;
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
while (true) {
final List<String> urisToDelete =
userMetadataService.getDeletedDataMetadataUris(priorTo.toDate(), 0, limit);
log.info("Count of deleted marked data metadata: {}", urisToDelete.size());
if (urisToDelete.size() > 0) {
final List<String> uris = urisToDelete.parallelStream().filter(uri -> !uri.contains("="))
.map(userMetadataService::getDescendantDataUris)
.flatMap(Collection::stream).collect(Collectors.toList());
uris.addAll(urisToDelete);
log.info("Count of deleted marked data metadata (including descendants) : {}", uris.size());
final List<List<String>> subListsUris = Lists.partition(uris, 1000);
subListsUris.parallelStream().forEach(subUris -> {
MetacatContextManager.setContext(metacatRequestContext);
final Map<String, List<QualifiedName>> uriPartitionQualifiedNames = partitionService
.getQualifiedNames(subUris, false);
final Map<String, List<QualifiedName>> uriTableQualifiedNames = tableService
.getQualifiedNames(subUris, false);
final Map<String, List<QualifiedName>> uriQualifiedNames =
Stream.concat(uriPartitionQualifiedNames.entrySet().stream(),
uriTableQualifiedNames.entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a, b) -> {
final List<QualifiedName> subNames = Lists.newArrayList(a);
subNames.addAll(b);
return subNames;
}));
final List<String> canDeleteMetadataForUris = subUris.parallelStream()
.filter(s -> !Strings.isNullOrEmpty(s))
.filter(s -> uriQualifiedNames.get(s) == null || uriQualifiedNames.get(s).size() == 0)
.collect(Collectors.toList());
log.info("Start deleting data metadata: {}", canDeleteMetadataForUris.size());
userMetadataService.deleteDataMetadata(canDeleteMetadataForUris);
userMetadataService.deleteDataMetadataDeletes(subUris);
MetacatContextManager.removeContext();
});
}
if (urisToDelete.size() < limit) {
break;
}
}
} catch (Exception e) {
registry.counter(Metrics.CounterDeleteMetaData.getMetricName()).increment();
log.warn("Failed deleting data metadata", e);
}
log.info("End deleting data metadata");
}
/**
* Deletes definition metadata of tables/views/partitions that have been deleted already.
*/
public void cleanUpObsoleteDefinitionMetadata() {
log.info("Start deleting obsolete definition metadata");
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
List<DefinitionMetadataDto> dtos = null;
int offset = 0;
final int limit = 10000;
int totalDeletes = 0;
while (offset == 0 || dtos.size() == limit) {
dtos = userMetadataService.searchDefinitionMetadata(null, null, null, null,
"id", null, offset, limit);
final long deletes = dtos.parallelStream().map(dto -> {
try {
return deleteDefinitionMetadata(dto.getName(), false, metacatRequestContext);
} catch (Exception e) {
log.warn("Failed deleting obsolete definition metadata for table {}", dto.getName(), e);
return false;
}
})
.filter(b -> b).count();
totalDeletes += deletes;
offset += limit - deletes;
}
log.info("End deleting obsolete definition metadata. Deleted {} number of definition metadatas", totalDeletes);
}
/**
* Deletes definition metadata for the given <code>name</code>.
*
* @param name qualified name
* @param force If true, deletes the metadata without checking if database/table/partition exists
* @param metacatRequestContext request context
* @return true if deleted
*/
public boolean deleteDefinitionMetadata(final QualifiedName name, final boolean force,
final MetacatRequestContext metacatRequestContext) {
try {
final MetacatService service = this.helper.getService(name);
BaseDto dto = null;
if (!force) {
try {
dto = service.get(name);
} catch (final NotFoundException ignored) {
}
}
if ((force || dto == null) && !"rds".equalsIgnoreCase(name.getCatalogName())) {
if (dto != null) {
this.helper.postPreUpdateEvent(name, metacatRequestContext, dto);
} else {
this.helper.postPreDeleteEvent(name, metacatRequestContext);
}
this.userMetadataService.deleteDefinitionMetadata(Lists.newArrayList(name));
this.tagService.delete(name, false);
log.info("Deleted definition metadata for {}", name);
if (dto != null) {
final BaseDto newDto = service.get(name);
this.helper.postPostUpdateEvent(name, metacatRequestContext, dto, newDto);
} else {
this.helper.postPostDeleteEvent(name, metacatRequestContext);
}
return true;
}
} catch (Exception e) {
log.warn("Failed deleting definition metadata for name {}.", name, e);
throw e;
}
return false;
}
/**
* Deletes tags for deleted tables.
*/
public void cleanUpObsoleteTags() {
log.info("Start deleting obsolete tags");
final List<QualifiedName> names = tagService.list(null, null, null, null, null,
null);
names.forEach(name -> {
if (!name.isPartitionDefinition() && !name.isViewDefinition() && name.isTableDefinition()
&& !tableService.exists(name)) {
this.tagService.delete(name, false);
log.info("Deleted obsolete tag for {}", name);
}
});
log.info("End deleting obsolete tags");
}
}
| 2,127 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/MViewServiceEventHandler.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.events.AsyncListener;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Component;
import java.util.List;
/**
* Event handler for view changes based on table changes.
*
* @author amajumdar
*/
@Slf4j
@Component
@AsyncListener
public class MViewServiceEventHandler {
private final Config config;
private final MViewService mViewService;
private final UserMetadataService userMetadataService;
/**
* Default constructor.
* @param config server configurations
* @param mViewService view service
* @param userMetadataService user metadata service
*/
@Autowired
public MViewServiceEventHandler(final Config config,
final MViewService mViewService,
final UserMetadataService userMetadataService) {
this.config = config;
this.mViewService = mViewService;
this.userMetadataService = userMetadataService;
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteTablePostEventHandler(final MetacatDeleteTablePostEvent event) {
if (config.canCascadeViewsMetadataOnTableDelete() && !event.isMView()) {
final QualifiedName name = event.getTable().getName();
try {
// delete views associated with this table
final List<NameDateDto> viewNames = mViewService.list(name);
viewNames.forEach(viewName -> mViewService.deleteAndReturn(viewName.getName()));
} catch (Exception e) {
log.warn("Failed cleaning mviews after deleting table {}", name);
}
// delete table partitions metadata
try {
final List<QualifiedName> names = userMetadataService.getDescendantDefinitionNames(name);
if (names != null && !names.isEmpty()) {
userMetadataService.deleteDefinitionMetadata(names);
}
} catch (Exception e) {
log.warn("Failed cleaning partition definition metadata after deleting table {}", name);
}
}
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatRenameTablePostEventHandler(final MetacatRenameTablePostEvent event) {
if (!event.isMView()) {
final QualifiedName oldName = event.getOldTable().getName();
final QualifiedName newName = event.getCurrentTable().getName();
final List<NameDateDto> views = mViewService.list(oldName);
if (views != null && !views.isEmpty()) {
views.forEach(view -> {
final QualifiedName newViewName = QualifiedName
.ofView(oldName.getCatalogName(), oldName.getDatabaseName(), newName.getTableName(),
view.getName().getViewName());
mViewService.rename(view.getName(), newViewName);
});
}
}
}
}
| 2,128 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import javax.annotation.Nonnull;
import java.util.List;
/**
* Catalog service.
*/
public interface CatalogService {
/**
* Gets the catalog.
* @param name Qualified name of the catalog
* @return the information about the given catalog
*/
@Nonnull
CatalogDto get(QualifiedName name);
/**
* Gets the catalog. Returned dto will have details if asked.
* @param name Qualified name of the catalog
* @param getCatalogServiceParameters parameters
* @return the information about the given catalog
*/
@Nonnull
CatalogDto get(QualifiedName name, GetCatalogServiceParameters getCatalogServiceParameters);
/**
* List of registered catalogs.
* @return all of the registered catalogs
*/
@Nonnull
List<CatalogMappingDto> getCatalogNames();
/**
* Updates the catalog.
* @param name Qualified name of the catalog
* @param createCatalogDto catalog
*/
void update(QualifiedName name, CreateCatalogDto createCatalogDto);
}
| 2,129 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/GetCatalogServiceParameters.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import lombok.Builder;
import lombok.Value;
/**
* Get Catalog Parameters.
*
* @author amajumdar
* @since 1.2.0
*/
@Value
@Builder
public class GetCatalogServiceParameters {
private final boolean includeDatabaseNames;
private final boolean includeUserMetadata;
private final boolean includeMetadataFromConnector;
}
| 2,130 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/GetDatabaseServiceParameters.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services;
import lombok.Builder;
import lombok.Value;
/**
* Getdatabase Parameters.
* @author zhenl
* @since 1.2.0
*/
@Value
@Builder
public class GetDatabaseServiceParameters {
private final boolean disableOnReadMetadataIntercetor;
private final boolean includeTableNames;
private final boolean includeUserMetadata;
private final boolean includeMetadataFromConnector;
}
| 2,131 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/TableService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* Table service.
*/
public interface TableService extends MetacatService<TableDto> {
/**
* Deletes the table. Returns the table metadata of the table deleted.
* @param name qualified name of the table to be deleted
* @param isMView true if this table is created for a mview
* @return Returns the deleted table
*/
TableDto deleteAndReturn(QualifiedName name, boolean isMView);
/**
* Returns the table with the given name.
* @param name qualified name of the table
* @param getTableServiceParameters get table parameters
* @return Returns the table with the given name
*/
Optional<TableDto> get(QualifiedName name, GetTableServiceParameters getTableServiceParameters);
/**
* Rename the table from <code>oldName</code> to <code>newName</code>.
* @param oldName old qualified name of the existing table
* @param newName new qualified name of the table
* @param isMView true, if the object is a view
*/
void rename(QualifiedName oldName, QualifiedName newName, boolean isMView);
/**
* Copies the table metadata from source table <code>name</code> to target table <code>targetName</code>.
* @param name qualified name of the source table
* @param targetName qualified name of the target table
* @return Returns the copied table
*/
TableDto copy(QualifiedName name, QualifiedName targetName);
/**
* Copies the table metadata from source table <code>name</code> to target table <code>targetName</code>.
* @param tableDto source table
* @param targetName qualified name of the target table
* @return Returns the copied table
*/
TableDto copy(TableDto tableDto, QualifiedName targetName);
/**
* Saves the user metadata for the given table.
* @param name qualified name of the table
* @param definitionMetadata user definition metadata json
* @param dataMetadata user data metadata json
*/
void saveMetadata(QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata);
/**
* Returns a list of qualified names of tables that refers to the given <code>uri</code>. If prefixSearch is true,
* it will consider the uri has a prefix and so it does not do a exact match.
* @param uri uri/location
* @param prefixSearch if false, the method looks for exact match for the uri
* @return list of table names
*/
List<QualifiedName> getQualifiedNames(String uri, boolean prefixSearch);
/**
* Returns a map of list of qualified names of tables that refers to the given <code>uri</code>.
* If prefixSearch is true, it will consider the uri has a prefix and so it does not do a exact match.
* @param uris uris/locations
* @param prefixSearch if false, the method looks for exact match for the uri
* @return Map of list of table names
*/
Map<String, List<QualifiedName>> getQualifiedNames(List<String> uris, boolean prefixSearch);
/**
* Returns a list of qualified names of tables that matches the given filter.
* @param name catalog name
* @param parameters parameters used to get the table names
* @return list of table names
*/
List<QualifiedName> getQualifiedNames(QualifiedName name, GetTableNamesServiceParameters parameters);
}
| 2,132 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/OwnerValidationService.java
|
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import lombok.NonNull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
/**
* Interface for validating table owner attribute.
*/
public interface OwnerValidationService {
/**
* Returns an ordered list of owners to use used for validation and owner assignment. Since metacat owners
* in a request may come from a number of places (DTO, Request context) this method centralizes that order.
*
* @param dto the input Table Dto
* @return an ordered list of owners to use used for validation and owner assignment
*/
List<String> extractPotentialOwners(@NonNull TableDto dto);
/**
* Returns an ordered list of owner groups to use used for validation and owner assignment. Since metacat owners
* in a request may come from a number of places (DTO, Request context) this method centralizes that order.
*
* @param dto the input Table Dto
* @return an ordered list of owner groups to use used for validation and owner assignment
*/
default List<String> extractPotentialOwnerGroups(@NonNull TableDto dto) {
return Collections.emptyList();
}
/**
* Checks whether the given owner is valid against a registry.
*
* @param user the user
* @return true if the owner is valid, else false
*/
boolean isUserValid(@Nullable String user);
/**
* Checks whether the given owner group is valid against a registry.
*
* @param groupName the groupName
* @return true if the owner group is valid, else false
*/
default boolean isGroupValid(@Nullable String groupName) {
return true;
}
/**
* Enforces valid table owner attribute. Implementations are free to
* handle it as needed - throw exceptions or ignore. The owner attribute
* in the DTO may or may not be valid so implementations should check for validity
* before enforcement.
*
* @param operationName the name of the metacat API, useful for logging
* @param tableName the name of the table
* @param tableDto the table dto containing the owner in the definition metadata field
*/
void enforceOwnerValidation(@NonNull String operationName,
@NonNull QualifiedName tableName,
@NonNull TableDto tableDto);
}
| 2,133 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/PartitionService.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
/**
* Partition service.
*/
public interface PartitionService extends MetacatService<PartitionDto> {
/**
* Returns the list of partitions.
*
* @param name table name
* @param sort sort info
* @param pageable pagination info
* @param includeUserDefinitionMetadata if true, includes the definition metadata
* @param includeUserDataMetadata if true, includes the data metadata
* @param getPartitionsRequestDto getPartitionsRequestDto
* @return list of partitions
*/
List<PartitionDto> list(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
boolean includeUserDefinitionMetadata,
boolean includeUserDataMetadata,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto);
/**
* Partition count for the given table name.
*
* @param name table name
* @return no. of partitions
*/
Integer count(QualifiedName name);
/**
* Saves the list of partitions to the given table <code>name</code>. By default, if a partition exists, it drops
* the partition before adding it. If <code>alterIfExists</code> is true, then it will alter the partition.
*
* @param name table name
* @param partitionsSaveRequestDto request dto containing the partitions to be added and deleted
* @return no. of partitions added and updated.
*/
PartitionsSaveResponseDto save(QualifiedName name, PartitionsSaveRequestDto partitionsSaveRequestDto);
/**
* Deletes the partitions with the given <code>partitionIds</code> for the given table name.
*
* @param name table name
* @param partitionIds partition names
*/
void delete(QualifiedName name, List<String> partitionIds);
/**
* Returns the qualified names of partitions that refer to the given uri.
*
* @param uri uri
* @param prefixSearch if true, this method does a prefix search
* @return list of names
*/
List<QualifiedName> getQualifiedNames(String uri, boolean prefixSearch);
/**
* Returns a map of uri to qualified names.
*
* @param uris list of uris
* @param prefixSearch if true, this method does a prefix search
* @return map of uri to qualified names
*/
Map<String, List<QualifiedName>> getQualifiedNames(List<String> uris, boolean prefixSearch);
/**
* Returns a list of partition names.
*
* @param name table name
* @param sort sort info
* @param pageable pagination info
* @param getPartitionsRequestDto get partition request dto
* @return list of partition names
*/
List<String> getPartitionKeys(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto);
/**
* Returns a list of partition uris.
*
* @param name table name
* @param sort sort info
* @param pageable pagination info
* @param getPartitionsRequestDto get partition request dto
* @return list of partition uris
*/
List<String> getPartitionUris(
QualifiedName name,
@Nullable Sort sort,
@Nullable Pageable pageable,
@Nullable GetPartitionsRequestDto getPartitionsRequestDto);
}
| 2,134 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes services.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,135 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogTraversalAction.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import java.util.List;
import java.util.Optional;
/**
* Action interface that will be called on catalog traversal.
*/
public interface CatalogTraversalAction {
/**
* Called when the catalog traversal starts.
*
* @param context traversal context
*/
default void init(CatalogTraversal.Context context) { }
/**
* Called when the catalog traversal processes catalogs.
*
* @param context traversal context
* @param catalogs list of catalogs
*/
default void applyCatalogs(CatalogTraversal.Context context, List<CatalogDto> catalogs) { }
/**
* Called when the catalog traversal processes databases.
*
* @param context traversal context
* @param databases lst of databases
*/
default void applyDatabases(CatalogTraversal.Context context, List<DatabaseDto> databases) { }
/**
* Called when the catalog traversal processes tables.
*
* @param context traversal context
* @param tables list of tables
*/
default void applyTables(CatalogTraversal.Context context, List<Optional<TableDto>> tables) { }
/**
* Called when the catalog traversal ends.
*
* @param context traversal context
*/
default void done(CatalogTraversal.Context context) { }
}
| 2,136 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/ConnectorTableServiceProxy.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import lombok.extern.slf4j.Slf4j;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.cache.annotation.Caching;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Handles calls to the connector table service.
*/
@Slf4j
@CacheConfig(cacheNames = "metacat")
public class ConnectorTableServiceProxy {
private final ConnectorManager connectorManager;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public ConnectorTableServiceProxy(
final ConnectorManager connectorManager,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.converterUtil = converterUtil;
}
/**
* Calls the connector table service create method.
* @param name table name
* @param tableInfo table object
*/
public void create(final QualifiedName name, final TableInfo tableInfo) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
service.create(connectorRequestContext, tableInfo);
}
/**
* Calls the connector table service delete method.
* @param name table name
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #name", beforeInvocation = true)
})
public void delete(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
log.info("Drop table {}", name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
service.delete(connectorRequestContext, name);
}
/**
* Returns an info object that's populated only with the metadata location. Fetches from the cache if
* useCache is set to true.
* @param name the table name
* @param getTableServiceParameters the table service parameters
* @param useCache true, if the location can be retrieved from the cache
* @return The table info object with the metadata location.
*/
@Cacheable(key = "'table.metadataLocationOnly.' + #name", condition = "#useCache")
public TableInfo getWithMetadataLocationOnly(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
* Returns an info object that's populated with info details. Fetches from the cache if useCache is set to true.
* @param name the table name
* @param getTableServiceParameters the table service parameters
* @param useCache true, if the location can be retrieved from the cache
* @return The table info object
*/
@Cacheable(key = "'table.includeInfoDetails.' + #name", condition = "#useCache")
public TableInfo getWithInfoDetails(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
*
* Returns table if <code>useCache</code> is true and object exists in the cache. If <code>useCache</code> is false
* or object does not exists in the cache, it is retrieved from the store.
* @param name table name
* @param getTableServiceParameters get table parameters
* @param useCache true, if table can be retrieved from cache
* @return table dto
*/
@Cacheable(key = "'table.' + #name", condition = "#useCache")
public TableInfo get(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getInternal(name, getTableServiceParameters);
}
/**
* Internal get implementation.
* @param name The table name.
* @param getTableServiceParameters get table parameters.
* @return The tableinfo instance.
*/
private TableInfo getInternal(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorRequestContext.setIncludeMetadataLocationOnly(
getTableServiceParameters.isIncludeMetadataLocationOnly());
connectorRequestContext.setIncludeMetadata(getTableServiceParameters.isIncludeMetadataFromConnector());
final ConnectorTableService service = connectorManager.getTableService(name);
return service.get(connectorRequestContext, name);
}
/**
* Calls the connector table service rename method.
* @param oldName old table name
* @param newName new table name
* @param isMView true, if the object is a view
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #oldName", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #oldName", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #oldName", beforeInvocation = true)
})
public void rename(
final QualifiedName oldName,
final QualifiedName newName,
final boolean isMView
) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(oldName);
try {
log.info("Renaming {} {} to {}", isMView ? "view" : "table", oldName, newName);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
service.rename(connectorRequestContext, oldName, newName);
} catch (UnsupportedOperationException ignored) {
}
}
/**
* Calls the connector table service update method.
* @param name table name
* @param tableInfo table object
* @return true if errors after this should be ignored.
*/
@Caching(evict = {
@CacheEvict(key = "'table.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.includeInfoDetails.' + #name", beforeInvocation = true),
@CacheEvict(key = "'table.metadataLocationOnly.' + #name", beforeInvocation = true)
})
public boolean update(final QualifiedName name, final TableInfo tableInfo) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
boolean result = false;
try {
log.info("Updating table {}", name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
service.update(connectorRequestContext, tableInfo);
result = connectorRequestContext.isIgnoreErrorsAfterUpdate();
} catch (UnsupportedOperationException ignored) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata.
log.debug("Catalog {} does not support the table update operation.", name.getCatalogName());
}
return result;
}
/**
* Calls the connector table service getTableNames method.
* @param uri location
* @param prefixSearch if false, the method looks for exact match for the uri
* @return list of table names
*/
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
final List<QualifiedName> result = Lists.newArrayList();
connectorManager.getTableServices().forEach(service -> {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> names =
service.getTableNames(connectorRequestContext, Lists.newArrayList(uri), prefixSearch);
final List<QualifiedName> qualifiedNames = names.values().stream().flatMap(Collection::stream)
.collect(Collectors.toList());
result.addAll(qualifiedNames);
} catch (final UnsupportedOperationException uoe) {
log.debug("Table service doesn't support getting table names by URI. Skipping");
}
});
return result;
}
/**
* Calls the connector table service getTableNames method.
* @param uris list of locations
* @param prefixSearch if false, the method looks for exact match for the uri
* @return list of table names
*/
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
connectorManager.getTableServices().forEach(service -> {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> names =
service.getTableNames(connectorRequestContext, uris, prefixSearch);
names.forEach((uri, qNames) -> {
final List<QualifiedName> existingNames = result.get(uri);
if (existingNames == null) {
result.put(uri, qNames);
} else {
existingNames.addAll(qNames);
}
});
} catch (final UnsupportedOperationException uoe) {
log.debug("Table service doesn't support getting table names by URI. Skipping");
}
});
return result;
}
/**
* Calls the connector table service exists method.
* @param name table name
* @return true, if the object exists.
*/
public boolean exists(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
return service.exists(connectorRequestContext, name);
}
/**
* Returns a filtered list of table names.
* @param name catalog name
* @param parameters service parameters
* @return list of table names
*/
public List<QualifiedName> getQualifiedNames(final QualifiedName name,
final GetTableNamesServiceParameters parameters) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
return service.getTableNames(connectorRequestContext, name, parameters.getFilter(), parameters.getLimit());
}
}
| 2,137 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DefaultOwnerValidationService.java
|
package com.netflix.metacat.main.services.impl;
import com.google.common.collect.ImmutableSet;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.services.OwnerValidationService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.annotation.Nullable;
import javax.servlet.http.HttpServletRequest;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* A default implementation of Ownership validation service that check for users against
* known invalid userIds.
*/
@Slf4j
@RequiredArgsConstructor
public class DefaultOwnerValidationService implements OwnerValidationService {
private static final Set<String> KNOWN_INVALID_OWNERS = ImmutableSet.of(
"root", "metacat", "metacat-thrift-interface");
private static final Set<String> KNOWN_INVALID_OWNER_GROUPS = ImmutableSet.of(
"root", "metacat", "metacat-thrift-interface");
private final Registry registry;
@Override
public List<String> extractPotentialOwners(@NonNull final TableDto dto) {
return Stream.of(
dto.getTableOwner().orElse(null),
MetacatContextManager.getContext().getUserName(),
dto.getSerde().getOwner()
).filter(Objects::nonNull).collect(Collectors.toList());
}
@Override
public List<String> extractPotentialOwnerGroups(@NonNull final TableDto dto) {
return Collections.singletonList(dto.getTableOwnerGroup().orElse(null));
}
@Override
public boolean isUserValid(@Nullable final String user) {
return !isKnownInvalidUser(user);
}
@Override
public boolean isGroupValid(@Nullable final String groupName) {
return !isKnownInvalidGroup(groupName);
}
@Override
public void enforceOwnerValidation(@NonNull final String operationName,
@NonNull final QualifiedName tableName,
@NonNull final TableDto tableDto) {
final String tableOwner = tableDto.getTableOwner().orElse(null);
final String tableOwnerGroup = tableDto.getTableOwnerGroup().orElse(null);
final MetacatRequestContext context = MetacatContextManager.getContext();
final Map<String, String> requestHeaders = getHttpHeaders();
final boolean tableOwnerValid = isUserValid(tableOwner) || isGroupValid(tableOwnerGroup);
logOwnershipDiagnosticDetails(
operationName, tableName, tableDto,
context, tableOwnerValid, requestHeaders);
}
/**
* Checks if the user is from a known list of invalid users. Subclasses can use
* this method before attempting to check against remote services to save on latency.
*
* @param userId the user id
* @return true if the user id is a known invalid user, else false
*/
protected boolean isKnownInvalidUser(@Nullable final String userId) {
return StringUtils.isBlank(userId) || knownInvalidOwners().contains(userId);
}
/**
* Checks if the group is from a known list of invalid groups. Subclasses can use
* this method before attempting to check against remote services to save on latency.
*
* @param groupName the group name
* @return true if the group is a known invalid group, else false
*/
protected boolean isKnownInvalidGroup(@Nullable final String groupName) {
return StringUtils.isBlank(groupName) || knownInvalidOwnerGroups().contains(groupName);
}
/**
* Returns set of known invalid users. Subclasses can override to provide
* a list fetched from a dynamic source.
*
* @return set of known invalid users
*/
protected Set<String> knownInvalidOwners() {
return KNOWN_INVALID_OWNERS;
}
/**
* Returns set of known invalid owner groups. Subclasses can override to provide
* a list fetched from a dynamic source.
*
* @return set of known invalid groups
*/
protected Set<String> knownInvalidOwnerGroups() {
return KNOWN_INVALID_OWNER_GROUPS;
}
/**
* Logs diagnostic data for debugging invalid owners. Subclasses can use this to log
* diagnostic data when owners are found to be invalid.
*/
protected void logOwnershipDiagnosticDetails(final String operationName,
final QualifiedName name,
final TableDto tableDto,
final MetacatRequestContext context,
final boolean tableOwnerValid,
final Map<String, String> requestHeaders) {
try {
if (!tableOwnerValid) {
registry.counter(
"metacat.table.owner.invalid",
"operation", operationName,
"scheme", String.valueOf(context.getScheme()),
"catalogAndDb", name.getCatalogName() + "_" + name.getDatabaseName()
).increment();
log.info("Operation: {}, invalid owner: {}, group: {}. name: {}, dto: {}, context: {}, headers: {}",
operationName,
tableDto.getTableOwner().orElse("<null>"),
tableDto.getTableOwnerGroup().orElse("<null>"),
name, tableDto, context, requestHeaders);
}
} catch (final Exception ex) {
log.warn("Error when logging diagnostic data for invalid owner for operation: {}, name: {}, table: {}",
operationName, name, tableDto, ex);
}
}
/**
* Returns all the Http headers for the current request. Subclasses can use it to
* log diagnostic data.
*
* @return the Http headers
*/
protected Map<String, String> getHttpHeaders() {
final Map<String, String> requestHeaders = new HashMap<>();
final RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (requestAttributes instanceof ServletRequestAttributes) {
final ServletRequestAttributes servletRequestAttributes = (ServletRequestAttributes) requestAttributes;
final HttpServletRequest servletRequest = servletRequestAttributes.getRequest();
if (servletRequest != null) {
final Enumeration<String> headerNames = servletRequest.getHeaderNames();
if (headerNames != null) {
while (headerNames.hasMoreElements()) {
final String header = headerNames.nextElement();
requestHeaders.put(header, servletRequest.getHeader(header));
}
}
}
}
return requestHeaders;
}
}
| 2,138 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/CatalogServiceImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.CreateCatalogDto;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.CatalogInfo;
import com.netflix.metacat.common.server.connectors.model.ClusterInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.main.services.GetCatalogServiceParameters;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Catalog service implementation.
*/
public class CatalogServiceImpl implements CatalogService {
private final ConnectorManager connectorManager;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param userMetadataService user metadata service
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public CatalogServiceImpl(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public CatalogDto get(final QualifiedName name, final GetCatalogServiceParameters getCatalogServiceParameters) {
final Set<MetacatCatalogConfig> configs = connectorManager.getCatalogConfigs(name.getCatalogName());
final CatalogDto result = new CatalogDto();
result.setName(name);
// Prepare the connector context
final ConnectorRequestContext context = converterUtil.toConnectorContext(MetacatContextManager.getContext());
context.setIncludeMetadata(getCatalogServiceParameters.isIncludeMetadataFromConnector());
final List<String> databases = Lists.newArrayList();
configs.forEach(config -> {
QualifiedName qName = name;
if (config.getSchemaWhitelist().isEmpty()) {
result.setType(config.getType());
} else {
qName = QualifiedName.ofDatabase(name.getCatalogName(), config.getSchemaWhitelist().get(0));
}
if (getCatalogServiceParameters.isIncludeDatabaseNames()) {
databases.addAll(
connectorManager.getDatabaseService(qName).listNames(context, name, null, null, null)
.stream().map(QualifiedName::getDatabaseName)
.filter(s -> config.getSchemaBlacklist().isEmpty() || !config.getSchemaBlacklist().contains(s))
.filter(s -> config.getSchemaWhitelist().isEmpty() || config.getSchemaWhitelist().contains(s))
.sorted(String.CASE_INSENSITIVE_ORDER)
.collect(Collectors.toList())
);
}
if (config.isProxy()) {
final CatalogInfo catalogInfo =
connectorManager.getCatalogService(name).get(context, name);
final ClusterInfo clusterInfo = catalogInfo.getClusterInfo();
result.setCluster(converterUtil.toClusterDto(clusterInfo));
result.setType(clusterInfo.getType());
result.setMetadata(catalogInfo.getMetadata());
} else {
result.setCluster(converterUtil.toClusterDto(config.getClusterInfo()));
}
});
result.setDatabases(databases);
if (getCatalogServiceParameters.isIncludeUserMetadata()) {
userMetadataService.populateMetadata(result, false);
}
return result;
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public CatalogDto get(final QualifiedName name) {
return get(name, GetCatalogServiceParameters.builder().includeDatabaseNames(true)
.includeUserMetadata(true).build());
}
/**
* {@inheritDoc}
*/
@Nonnull
@Override
public List<CatalogMappingDto> getCatalogNames() {
final Set<CatalogInfo> catalogs = connectorManager.getCatalogs();
if (catalogs.isEmpty()) {
throw new MetacatNotFoundException("Unable to locate any catalogs");
}
return catalogs.stream()
.map(catalog -> new CatalogMappingDto(catalog.getName().getCatalogName(),
catalog.getClusterInfo().getType(), converterUtil.toClusterDto(catalog.getClusterInfo())))
.distinct()
.collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final CreateCatalogDto createCatalogDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatRequestContext, this));
connectorManager.getCatalogConfigs(name.getCatalogName());
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), createCatalogDto, true);
eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
}
}
| 2,139 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/PartitionServiceImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.TableMigrationInProgressException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPreEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Partition service.
*/
@Slf4j
public class PartitionServiceImpl implements PartitionService {
private final CatalogService catalogService;
private final ConnectorManager connectorManager;
private final TableService tableService;
private final UserMetadataService userMetadataService;
private final ThreadServiceManager threadServiceManager;
private final Config config;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
private final Registry registry;
private final Id partitionAddDistSummary;
private final Id partitionMetadataOnlyAddDistSummary;
private final Id partitionGetDistSummary;
private final Id partitionDeleteDistSummary;
/**
* Constructor.
*
* @param catalogService catalog service
* @param connectorManager connector manager
* @param tableService table service
* @param userMetadataService user metadata service
* @param threadServiceManager thread manager
* @param config configurations
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @param registry registry handle
*/
public PartitionServiceImpl(
final CatalogService catalogService,
final ConnectorManager connectorManager,
final TableService tableService,
final UserMetadataService userMetadataService,
final ThreadServiceManager threadServiceManager,
final Config config,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil,
final Registry registry
) {
this.catalogService = catalogService;
this.connectorManager = connectorManager;
this.tableService = tableService;
this.userMetadataService = userMetadataService;
this.threadServiceManager = threadServiceManager;
this.config = config;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
this.registry = registry;
this.partitionAddDistSummary =
registry.createId(Metrics.DistributionSummaryAddPartitions.getMetricName());
this.partitionMetadataOnlyAddDistSummary =
registry.createId(Metrics.DistributionSummaryMetadataOnlyAddPartitions.getMetricName());
this.partitionGetDistSummary =
registry.createId(Metrics.DistributionSummaryGetPartitions.getMetricName());
this.partitionDeleteDistSummary =
registry.createId(Metrics.DistributionSummaryDeletePartitions.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionDto> list(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includeUserDefinitionMetadata,
final boolean includeUserDataMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
//add table info here
// the conversion will handle getPartitionsRequestDto as null case
final PartitionListRequest partitionListRequest =
converterUtil.toPartitionListRequest(getPartitionsRequestDto, pageable, sort);
final String filterExpression = partitionListRequest.getFilter();
final List<String> partitionNames = partitionListRequest.getPartitionNames();
if (Strings.isNullOrEmpty(filterExpression)
&& (pageable == null || !pageable.isPageable())
&& (partitionNames == null || partitionNames.isEmpty())
&& config.getNamesToThrowErrorOnListPartitionsWithNoFilter().contains(name)) {
throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
}
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> resultInfo = service
.getPartitions(connectorRequestContext, name, partitionListRequest, this.getTableInfo(name));
List<PartitionDto> result = Lists.newArrayList();
if (resultInfo != null && !resultInfo.isEmpty()) {
result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
final List<QualifiedName> names = Lists.newArrayList();
final List<String> uris = Lists.newArrayList();
final Map<String, ObjectNode> prePopulatedMap = new HashMap<>();
resultInfo.stream().filter(partitionInfo -> partitionInfo.getDataMetrics() != null)
.forEach(partitionInfo ->
prePopulatedMap.put(partitionInfo.getName().toString(), partitionInfo.getDataMetrics()));
result.forEach(partitionDto -> {
names.add(partitionDto.getName());
if (partitionDto.isDataExternal()) {
uris.add(partitionDto.getDataUri());
}
});
registry.distributionSummary(
this.partitionGetDistSummary.withTags(name.parts())).record(result.size());
log.info("Got {} partitions for {} using filter: {} and partition names: {}",
result.size(), name, filterExpression,
partitionNames);
if (includeUserDefinitionMetadata || includeUserDataMetadata) {
final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata
? userMetadataService.getDefinitionMetadataMap(names)
: Maps.newHashMap()));
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata
? userMetadataService.getDataMetadataMap(uris)
: Maps.newHashMap()));
try {
final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures)
.get(1, TimeUnit.HOURS);
final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto,
definitionMetadataMap.get(partitionDto.getName().toString()),
prePopulatedMap.containsKey(partitionDto.getName().toString())
? prePopulatedMap.get(partitionDto.getName().toString()) //using the prepopulated datametric
: dataMetadataMap.get(partitionDto.getDataUri())));
} catch (Exception e) {
Throwables.propagate(e);
}
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public Integer count(final QualifiedName name) {
Integer result = 0;
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
result = service.getPartitionCount(connectorRequestContext, name, this.getTableInfo(name));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsSaveResponseDto save(final QualifiedName name, final PartitionsSaveRequestDto dto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final List<PartitionDto> partitionDtos = dto.getPartitions();
// If no partitions are passed, then return
if (partitionDtos == null || partitionDtos.isEmpty()) {
return new PartitionsSaveResponseDto();
}
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
// Fetch tableDto only if no update on tags configs exist.
if (MetacatUtils.configHasDoNotModifyForIcebergMigrationTag(config.getNoTableUpdateOnTags())) {
final TableDto tableDto = getTableDto(name);
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableUpdateOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("PartitionUpdate", name.getTableName()));
}
}
//optimization for metadata only updates (e.g. squirrel) , assuming only validate partitions are requested
if (dto.getSaveMetadataOnly()) {
return savePartitionMetadataOnly(metacatRequestContext, dto, name, partitionDtos);
} else {
return updatePartitions(service, metacatRequestContext, dto, name, partitionDtos);
}
}
/**
* Optimization for metadata only updates.
*
* @param metacatRequestContext request context
* @param dto savePartition dto
* @param name qualified name
* @param partitionDtos partition dtos
* @return empty save partition response dto
*/
private PartitionsSaveResponseDto savePartitionMetadataOnly(
final MetacatRequestContext metacatRequestContext,
final PartitionsSaveRequestDto dto,
final QualifiedName name, final List<PartitionDto> partitionDtos) {
validateAdds(name, partitionDtos.size());
registry.distributionSummary(
this.partitionMetadataOnlyAddDistSummary.withTags(name.parts())).record(partitionDtos.size());
eventBus.post(
new MetacatSaveTablePartitionMetadataOnlyPreEvent(name, metacatRequestContext, this, dto));
// Save metadata
log.info("Saving metadata only for partitions for {}", name);
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), partitionDtos, true);
eventBus.post(
new MetacatSaveTablePartitionMetadataOnlyPostEvent(
name, metacatRequestContext, this, partitionDtos, new PartitionsSaveResponseDto()));
//empty saveResponseDto is returned for optimization purpose
//since client (squirrel) only checks the response code
return converterUtil.toPartitionsSaveResponseDto(new PartitionsSaveResponse());
}
/**
* Add, delete, update partitions.
*
* @param service partition service
* @param metacatRequestContext metacat request context
* @param dto partition save request dto
* @param name qualified name
* @param partitionDtos partitions dto
* @return partition save response dto
*/
private PartitionsSaveResponseDto updatePartitions(
final ConnectorPartitionService service,
final MetacatRequestContext metacatRequestContext,
final PartitionsSaveRequestDto dto,
final QualifiedName name, final List<PartitionDto> partitionDtos) {
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
List<HasMetadata> deletePartitions = Lists.newArrayList();
List<PartitionDto> deletePartitionDtos = Lists.newArrayList();
validate(name, dto);
registry.distributionSummary(
this.partitionAddDistSummary.withTags(name.parts())).record(partitionDtos.size());
final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
registry.distributionSummary(
this.partitionDeleteDistSummary.withTags(name.parts())).record(partitionIdsForDeletes.size());
final GetPartitionsRequestDto requestDto =
new GetPartitionsRequestDto(null, partitionIdsForDeletes, false, true);
final List<PartitionInfo> deletePartitionInfos = service.getPartitions(connectorRequestContext, name,
converterUtil.toPartitionListRequest(requestDto, null, null), this.getTableInfo(name));
if (deletePartitionInfos != null) {
deletePartitionDtos = deletePartitionInfos.stream()
.map(converterUtil::toPartitionDto).collect(Collectors.toList());
deletePartitions = new ArrayList<>(deletePartitions);
}
}
// Save all the new and updated partitions
eventBus.post(new MetacatSaveTablePartitionPreEvent(name, metacatRequestContext, this, dto));
log.info("Saving partitions for {} ({})", name, partitionDtos.size());
final PartitionsSaveResponseDto result = converterUtil.toPartitionsSaveResponseDto(
service.savePartitions(connectorRequestContext, name, converterUtil.toPartitionsSaveRequest(dto)));
// Save metadata
log.info("Saving user metadata for partitions for {}", name);
// delete metadata
if (!deletePartitions.isEmpty()) {
log.info("Deleting user metadata for partitions with names {} for {}", partitionIdsForDeletes, name);
deleteMetadatas(metacatRequestContext.getUserName(), deletePartitions);
}
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), partitionDtos, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSavePartitionMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
//publish the delete and save in order
//TODO: create MetacatUpdateTablePartitionEvents, only publish one partitionUpdateEvent here.
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(
new MetacatDeleteTablePartitionPostEvent(name,
metacatRequestContext, this, deletePartitionDtos));
}
eventBus.post(
new MetacatSaveTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos, result));
return result;
}
private void validate(final QualifiedName name, final PartitionsSaveRequestDto dto) {
validateDeletes(name, dto.getPartitionIdsForDeletes() != null ? dto.getPartitionIdsForDeletes().size() : 0);
validateAdds(name, dto.getPartitions() != null ? dto.getPartitions().size() : 0);
}
private void validateDeletes(final QualifiedName name, final int noOfDeletes) {
if (noOfDeletes > config.getMaxDeletedPartitionsThreshold()) {
final String message =
String.format("Number of partitions to be deleted for table %s exceeded the threshold %d",
name, config.getMaxDeletedPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
private void validateAdds(final QualifiedName name, final int noOfAdds) {
if (noOfAdds > config.getMaxAddedPartitionsThreshold()) {
final String message =
String.format("Number of partitions to be added/updated for table %s exceeded the threshold %d",
name, config.getMaxAddedPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name, final List<String> partitionIds) {
validateDeletes(name, partitionIds != null ? partitionIds.size() : 0);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
registry.distributionSummary(
this.partitionDeleteDistSummary.withTags(name.parts())).record(partitionIds.size());
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
// Fetch tableDto only if no update on tags configs exist.
if (MetacatUtils.configHasDoNotModifyForIcebergMigrationTag(config.getNoTableDeleteOnTags())) {
final TableDto tableDto = getTableDto(name);
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableDeleteOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("PartitionDelete", name.getTableName()));
}
}
if (!partitionIds.isEmpty()) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
// Get the partitions before calling delete
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto(null, partitionIds, false, true);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> partitionInfos = service.getPartitions(connectorRequestContext, name,
converterUtil.toPartitionListRequest(requestDto, null, null), this.getTableInfo(name));
List<HasMetadata> partitions = Lists.newArrayList();
List<PartitionDto> partitionDtos = Lists.newArrayList();
if (partitionInfos != null) {
partitionDtos = partitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
partitions = new ArrayList<>(partitions);
}
log.info("Deleting partitions with names {} for {}", partitionIds, name);
service.deletePartitions(connectorRequestContext, name, partitionIds, this.getTableInfo(name));
// delete metadata
log.info("Deleting user metadata for partitions with names {} for {}", partitionIds, name);
if (!partitions.isEmpty()) {
deleteMetadatas(metacatRequestContext.getUserName(), partitions);
}
eventBus.post(
new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos)
);
}
}
private void deleteMetadatas(final String userId, final List<HasMetadata> partitions) {
// Spawning off since this is a time consuming task
threadServiceManager.getExecutor().submit(() -> userMetadataService.deleteMetadata(userId, partitions));
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
return getQualifiedNames(Lists.newArrayList(uri), prefixSearch).values().stream().flatMap(Collection::stream)
.collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
final Map<String, List<QualifiedName>> result = Maps.newConcurrentMap();
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
connectorManager.getPartitionServices().forEach(service -> {
futures.add(threadServiceManager.getExecutor().submit(() -> {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> partitionNames = service
.getPartitionNames(connectorRequestContext, uris, prefixSearch);
partitionNames.forEach((uri, subPartitionNames) -> {
final List<QualifiedName> existingPartitionNames = result.get(uri);
if (existingPartitionNames == null) {
result.put(uri, subPartitionNames);
} else {
existingPartitionNames.addAll(subPartitionNames);
}
});
} catch (final UnsupportedOperationException uoe) {
log.debug("Partition service doesn't support getPartitionNames. Ignoring.");
}
return null;
}));
});
try {
Futures.allAsList(futures).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionKeys(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionKeys(
connectorRequestContext,
name,
converterUtil.toPartitionListRequest(getPartitionsRequestDto, pageable, sort),
this.getTableInfo(name)
);
} catch (final UnsupportedOperationException uoe) {
log.debug("Catalog {} doesn't support getPartitionKeys. Ignoring.", name.getCatalogName());
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionUris(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name);
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionUris(connectorRequestContext, name,
converterUtil.toPartitionListRequest(
getPartitionsRequestDto, pageable, sort), this.getTableInfo(name));
} catch (final UnsupportedOperationException uoe) {
log.info("Catalog {} doesn't support getPartitionUris. Ignoring.", name.getCatalogName());
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto create(final QualifiedName name, final PartitionDto partitionDto) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setCheckIfExists(false);
dto.setPartitions(Lists.newArrayList(partitionDto));
save(name, dto);
return partitionDto;
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final PartitionDto partitionDto) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitions(Lists.newArrayList(partitionDto));
save(name, dto);
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto updateAndReturn(final QualifiedName name, final PartitionDto dto) {
update(name, dto);
return dto;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
delete(tableName, Lists.newArrayList(name.getPartitionName()));
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto get(final QualifiedName name) {
PartitionDto result = null;
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
final List<PartitionDto> dtos =
list(tableName, null, null, true, true,
new GetPartitionsRequestDto(null, Lists.newArrayList(name.getPartitionName()), true, true));
if (!dtos.isEmpty()) {
result = dtos.get(0);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
return get(name) != null;
}
private TableDto getTableDto(final QualifiedName name) {
return this.tableService.get(name,
GetTableServiceParameters.builder().includeInfo(true)
.useCache(true).build()).orElseThrow(() -> new TableNotFoundException(name));
}
private TableInfo getTableInfo(final QualifiedName name) {
return converterUtil.fromTableDto(getTableDto(name));
}
}
| 2,140 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/MViewServiceImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.NameDateDto;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteMViewPreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateMViewPreEvent;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.MetacatServiceHelper;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Metacat view service.
*/
@Slf4j
public class MViewServiceImpl implements MViewService {
/**
* Hive database name where views are stored.
*/
private static final String VIEW_DB_NAME = "franklinviews";
private final ConnectorManager connectorManager;
private final TableService tableService;
private final PartitionService partitionService;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param tableService table service
* @param partitionService partition service
* @param userMetadataService user metadata interceptor service
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
*/
public MViewServiceImpl(
final ConnectorManager connectorManager,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil
) {
this.connectorManager = connectorManager;
this.tableService = tableService;
this.partitionService = partitionService;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
}
/**
* Creates the materialized view using the schema of the give table
* Assumes that the "franklinviews" database name already exists in the given catalog.
*/
@Override
public TableDto create(final QualifiedName name) {
return createAndSnapshotPartitions(name, false, null);
}
/**
* Creates the materialized view using the schema of the give table
* Assumes that the "franklinviews" database name already exists in the given catalog.
*/
@Override
public TableDto createAndSnapshotPartitions(final QualifiedName name,
final boolean snapshot,
@Nullable final String filter) {
final TableDto result;
// Get the table
log.info("Get the table {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatCreateMViewPreEvent(name, metacatRequestContext, this, snapshot, filter));
final Optional<TableDto> oTable = tableService.get(name,
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.disableOnReadMetadataIntercetor(true) //turn off for optimization
.includeInfo(true)
.build());
if (oTable.isPresent()) {
final TableDto table = oTable.get();
final String viewName = createViewName(name);
final QualifiedName targetName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, viewName);
// Get the view table if it exists
log.info("Check if the view table {} exists.", targetName);
Optional<TableDto> oViewTable = Optional.empty();
try {
//read the original view back
oViewTable = tableService.get(targetName,
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.build());
} catch (NotFoundException ignored) {
}
if (!oViewTable.isPresent()) {
log.info("Creating view {}.", targetName);
//
// Fix issue where an iceberg table is used to create the mView and fails with invalid field type.
// The issue is caused by a mismatch with field source types.
// The check for iceberg table is needed to not disrupt the previous logic for other table types.
//
if (MetacatServiceHelper.isIcebergTable(table)) {
table.getFields().forEach(f -> f.setSource_type(null));
}
result = tableService.copy(table, targetName);
} else {
result = oViewTable.get();
}
if (snapshot) {
snapshotPartitions(name, filter);
}
eventBus.post(
new MetacatCreateMViewPostEvent(name, metacatRequestContext, this, result, snapshot, filter)
);
} else {
throw new TableNotFoundException(name);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto create(final QualifiedName name, final TableDto dto) {
// Ignore the dto passed
return create(name);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto deleteAndReturn(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatDeleteMViewPreEvent(name, metacatRequestContext, this));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
log.info("Deleting view {}.", viewQName);
final TableDto deletedDto = tableService.deleteAndReturn(viewQName, true);
eventBus.post(new MetacatDeleteMViewPostEvent(name, metacatRequestContext, this, deletedDto));
return deletedDto;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
tableService.deleteAndReturn(viewQName, true);
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final TableDto tableDto) {
updateAndReturn(name, tableDto);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto updateAndReturn(final QualifiedName name, final TableDto tableDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateMViewPreEvent(name, metacatRequestContext, this, tableDto));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
log.info("Updating view {}.", viewQName);
tableService.update(viewQName, tableDto);
final TableDto updatedDto = getOpt(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.disableOnReadMetadataIntercetor(false)
.build()).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.post(new MetacatUpdateMViewPostEvent(name, metacatRequestContext, this, updatedDto));
return updatedDto;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto get(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return tableService.get(viewQName);
}
/**
* {@inheritDoc}
*/
@Override
public Optional<TableDto> getOpt(final QualifiedName name, final GetTableServiceParameters tableParameters) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
final Optional<TableDto> result = tableService.get(viewQName, tableParameters);
//
// User definition metadata of the underlying table is returned
//
if (result.isPresent()) {
final TableDto table = result.get();
table.setName(name);
final QualifiedName tableName = QualifiedName
.ofTable(name.getCatalogName(), name.getDatabaseName(), name.getTableName());
final Optional<ObjectNode> definitionMetadata =
userMetadataService.getDefinitionMetadataWithInterceptor(tableName,
GetMetadataInterceptorParameters.builder().hasMetadata(table).build());
definitionMetadata.ifPresent(
jsonNodes -> userMetadataService.populateMetadata(table, jsonNodes, null));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void snapshotPartitions(final QualifiedName name, @Nullable final String filter) {
final List<PartitionDto> partitionDtos =
partitionService.list(name, null, null, false, false,
new GetPartitionsRequestDto(filter, null, true, true));
if (partitionDtos != null && !partitionDtos.isEmpty()) {
log.info("Snapshot partitions({}) for view {}.", partitionDtos.size(), name);
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitions(partitionDtos);
savePartitions(name, dto, false);
}
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsSaveResponseDto savePartitions(
final QualifiedName name,
final PartitionsSaveRequestDto dto,
final boolean merge
) {
final PartitionsSaveResponseDto result;
final List<PartitionDto> partitionDtos = dto.getPartitions();
if (partitionDtos == null || partitionDtos.isEmpty()) {
return new PartitionsSaveResponseDto();
}
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
partitionDtos.forEach(partitionDto ->
partitionDto.setName(QualifiedName
.ofPartition(viewQName.getCatalogName(), viewQName.getDatabaseName(), viewQName.getTableName(),
partitionDto.getName().getPartitionName())));
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatSaveMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
}
if (merge) {
final List<String> partitionNames = partitionDtos.stream().map(
partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList());
final List<PartitionDto> existingPartitions =
partitionService.list(viewQName, null, null, false, false,
new GetPartitionsRequestDto(null, partitionNames, false, true));
final Map<String, PartitionDto> existingPartitionsMap = existingPartitions.stream()
.collect(Collectors
.toMap(partitionDto -> partitionDto.getName().getPartitionName(), Function.identity()));
final List<PartitionDto> mergedPartitions = partitionDtos.stream()
.map(partitionDto -> {
final String partitionName = partitionDto.getName().getPartitionName();
final PartitionDto existingPartition = existingPartitionsMap.get(partitionName);
return mergePartition(partitionDto, existingPartition);
}).collect(Collectors.toList());
dto.setPartitions(mergedPartitions);
result = partitionService.save(viewQName, dto);
} else {
result = partitionService.save(viewQName, dto);
}
eventBus.post(
new MetacatSaveMViewPartitionPostEvent(name, metacatRequestContext, this, dto.getPartitions())
);
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
eventBus.post(
new MetacatDeleteMViewPartitionPostEvent(name, metacatRequestContext, this, partitionIdsForDeletes)
);
}
return result;
}
private PartitionDto mergePartition(final PartitionDto partitionDto,
@Nullable final PartitionDto existingPartition) {
if (existingPartition != null) {
final StorageDto existingSerde = existingPartition.getSerde();
if (existingSerde != null) {
StorageDto serde = partitionDto.getSerde();
if (serde == null) {
serde = new StorageDto();
partitionDto.setSerde(serde);
}
if (serde.getUri() == null || serde.getUri().equals(existingSerde.getUri())) {
serde.setUri(existingSerde.getUri());
if (serde.getInputFormat() == null) {
serde.setInputFormat(existingSerde.getInputFormat());
}
if (serde.getOutputFormat() == null) {
serde.setOutputFormat(existingSerde.getOutputFormat());
}
if (serde.getSerializationLib() == null) {
serde.setSerializationLib(existingSerde.getSerializationLib());
}
}
}
}
return partitionDto;
}
/**
* {@inheritDoc}
*/
@Override
public void deletePartitions(final QualifiedName name, final List<String> partitionIds) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, metacatRequestContext, this, dto));
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
partitionService.delete(viewQName, partitionIds);
eventBus.post(
new MetacatDeleteMViewPartitionPostEvent(name, metacatRequestContext, this, partitionIds)
);
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionDto> listPartitions(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
final boolean includeUserMetadata,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.list(viewQName, sort, pageable, includeUserMetadata, includeUserMetadata,
getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionKeys(
final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto
) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.getPartitionKeys(viewQName, sort, pageable, getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartitionUris(final QualifiedName name,
@Nullable final Sort sort,
@Nullable final Pageable pageable,
@Nullable final GetPartitionsRequestDto getPartitionsRequestDto) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.getPartitionUris(viewQName, sort, pageable, getPartitionsRequestDto);
}
/**
* {@inheritDoc}
*/
@Override
public Integer partitionCount(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return partitionService.count(viewQName);
}
/**
* {@inheritDoc}
*/
@Override
public List<NameDateDto> list(final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName viewDbName = QualifiedName.ofDatabase(name.getCatalogName(), VIEW_DB_NAME);
final ConnectorTableService service = connectorManager.getTableService(viewDbName);
List<QualifiedName> tableNames = Lists.newArrayList();
try {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
tableNames = service.listNames(connectorRequestContext, viewDbName, null, null, null);
} catch (Exception ignored) {
// ignore. Return an empty list if database 'franklinviews' does not exist
}
if (!name.isDatabaseDefinition() && name.isCatalogDefinition()) {
return tableNames.stream()
.map(viewName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(viewName);
return dto;
})
.collect(Collectors.toList());
} else {
final String prefix = String.format("%s_%s_", name.getDatabaseName(),
MoreObjects.firstNonNull(name.getTableName(), ""));
return tableNames.stream()
.filter(qualifiedTableName -> qualifiedTableName.getTableName().startsWith(prefix))
.map(qualifiedTableName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(QualifiedName
.ofView(qualifiedTableName.getCatalogName(), name.getDatabaseName(), name.getTableName(),
qualifiedTableName.getTableName().substring(prefix.length())));
return dto;
})
.collect(Collectors.toList());
}
}
/**
* {@inheritDoc}
*/
@Override
public void saveMetadata(final QualifiedName name,
final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
tableService.saveMetadata(viewQName, definitionMetadata, dataMetadata);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(final QualifiedName name, final QualifiedName newViewName) {
final QualifiedName oldViewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
final QualifiedName newViewQName = QualifiedName
.ofTable(newViewName.getCatalogName(), VIEW_DB_NAME, createViewName(newViewName));
tableService.rename(oldViewQName, newViewQName, true);
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
final QualifiedName viewQName =
QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name));
return tableService.exists(viewQName);
}
/**
* The view is going to be represented by a table in a special db in Franklin. As such there must be
* a conversion from view id -> view table id like so:
* [dbName]_[tableName]_[viewName]
*/
private String createViewName(final QualifiedName name) {
return String.format("%s_%s_%s", name.getDatabaseName(), name.getTableName(), name.getViewName());
}
}
| 2,141 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DatabaseServiceImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.MetacatOperation;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetDatabaseServiceParameters;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Database service implementation.
*/
@Slf4j
public class DatabaseServiceImpl implements DatabaseService {
private final ConnectorManager connectorManager;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final ConverterUtil converterUtil;
private final AuthorizationService authorizationService;
/**
* Constructor.
*
* @param connectorManager connector manager
* @param userMetadataService user metadata service
* @param eventBus internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @param authorizationService authorization service
*/
public DatabaseServiceImpl(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil,
final AuthorizationService authorizationService
) {
this.connectorManager = connectorManager;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.converterUtil = converterUtil;
this.authorizationService = authorizationService;
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto create(final QualifiedName name, final DatabaseDto dto) {
validate(name);
log.info("Creating schema {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatCreateDatabasePreEvent(name, metacatRequestContext, this));
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name).create(connectorRequestContext,
converterUtil.fromDatabaseDto(dto));
if (dto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for schema {}", name);
userMetadataService.saveDefinitionMetadata(name, metacatRequestContext.getUserName(),
Optional.of(dto.getDefinitionMetadata()), true);
}
final DatabaseDto createdDto = get(name,
GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(true)
.includeUserMetadata(dto.getDefinitionMetadata() != null)
.includeTableNames(true)
.build());
eventBus.post(new MetacatCreateDatabasePostEvent(name, metacatRequestContext, this, createdDto));
return createdDto;
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final DatabaseDto dto) {
validate(name);
log.info("Updating schema {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatRequestContext, this));
try {
final ConnectorRequestContext connectorRequestContext
= converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name)
.update(connectorRequestContext, converterUtil.fromDatabaseDto(dto));
} catch (UnsupportedOperationException ignored) {
}
if (dto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for schema {}", name);
userMetadataService.saveDefinitionMetadata(name, metacatRequestContext.getUserName(),
Optional.of(dto.getDefinitionMetadata()), true);
}
eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatRequestContext, this));
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto updateAndReturn(final QualifiedName name, final DatabaseDto dto) {
update(name, dto);
return get(name);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
name, MetacatOperation.DELETE);
log.info("Dropping schema {}", name);
final DatabaseDto dto = get(name, GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeUserMetadata(true)
.includeTableNames(true)
.build());
eventBus.post(new MetacatDeleteDatabasePreEvent(name, metacatRequestContext, this, dto));
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name).delete(connectorRequestContext, name);
// Delete definition metadata if it exists
if (userMetadataService.getDefinitionMetadata(name).isPresent()) {
log.info("Deleting user metadata for schema {}", name);
userMetadataService.deleteDefinitionMetadata(ImmutableList.of(name));
}
eventBus.post(new MetacatDeleteDatabasePostEvent(name, metacatRequestContext, this, dto));
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto get(final QualifiedName name) {
return get(name,
GetDatabaseServiceParameters.builder()
.includeUserMetadata(true)
.includeTableNames(true)
.disableOnReadMetadataIntercetor(false)
.build());
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseDto get(final QualifiedName name, final GetDatabaseServiceParameters getDatabaseServiceParameters) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final MetacatCatalogConfig config = connectorManager.getCatalogConfig(name);
final ConnectorDatabaseService service = connectorManager.getDatabaseService(name);
final ConnectorTableService tableService = connectorManager.getTableService(name);
// Prepare the connector request
final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorRequestContext.setIncludeMetadata(getDatabaseServiceParameters.isIncludeMetadataFromConnector());
final DatabaseDto dto = converterUtil.toDatabaseDto(service.get(connectorRequestContext, name));
dto.setType(config.getType());
if (getDatabaseServiceParameters.isIncludeTableNames()) {
final List<QualifiedName> tableNames = tableService
.listNames(connectorRequestContext, name, null, null, null);
List<QualifiedName> viewNames = Collections.emptyList();
if (config.isIncludeViewsWithTables()) {
// TODO JdbcMetadata returns ImmutableList.of() for views. We should change it to fetch views.
try {
viewNames = service.listViewNames(connectorRequestContext, name);
} catch (UnsupportedOperationException ignored) {
}
}
// Check to see if schema exists
if (tableNames.isEmpty() && viewNames.isEmpty() && !exists(name)) {
throw new DatabaseNotFoundException(name);
}
dto.setTables(
Stream.concat(tableNames.stream(), viewNames.stream())
.map(QualifiedName::getTableName)
.sorted(String.CASE_INSENSITIVE_ORDER)
.collect(Collectors.toList())
);
}
if (getDatabaseServiceParameters.isIncludeUserMetadata()) {
log.info("Populate user metadata for schema {}", name);
userMetadataService.populateMetadata(dto,
getDatabaseServiceParameters.isDisableOnReadMetadataIntercetor());
}
return dto;
}
/**
* {@inheritDoc}
*/
@SuppressFBWarnings
@Override
public boolean exists(final QualifiedName name) {
boolean result = false;
try {
result = get(name, GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(true).includeTableNames(false)
.includeUserMetadata(false).build()) != null;
} catch (NotFoundException ignored) {
// name does not exists.
}
return result;
}
private void validate(final QualifiedName name) {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkState(name.isDatabaseDefinition(), "name %s is not for a database", name);
}
}
| 2,142 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/TableServiceImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableMigrationInProgressException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateIcebergTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePreEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.common.server.usermetadata.MetacatOperation;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.OwnerValidationService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* Table service implementation.
*/
@Slf4j
@RequiredArgsConstructor
public class TableServiceImpl implements TableService {
private final ConnectorManager connectorManager;
private final ConnectorTableServiceProxy connectorTableServiceProxy;
private final DatabaseService databaseService;
private final TagService tagService;
private final UserMetadataService userMetadataService;
private final MetacatJson metacatJson;
private final MetacatEventBus eventBus;
private final Registry registry;
private final Config config;
private final ConverterUtil converterUtil;
private final AuthorizationService authorizationService;
private final OwnerValidationService ownerValidationService;
/**
* {@inheritDoc}
*/
@Override
public TableDto create(final QualifiedName name, final TableDto tableDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
tableDto.getName(), MetacatOperation.CREATE);
setDefaultAttributes(tableDto);
ownerValidationService.enforceOwnerValidation("createTable", name, tableDto);
log.info("Creating table {}", name);
eventBus.post(new MetacatCreateTablePreEvent(name, metacatRequestContext, this, tableDto));
connectorTableServiceProxy.create(name, converterUtil.fromTableDto(tableDto));
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
tag(name, tableDto.getDefinitionMetadata());
}
TableDto dto = tableDto;
try {
dto = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleExceptionOnCreate(name, "getTable", e);
}
try {
eventBus.post(new MetacatCreateTablePostEvent(name, metacatRequestContext, this, dto));
} catch (Exception e) {
handleExceptionOnCreate(name, "postEvent", e);
}
return dto;
}
private void setDefaultAttributes(final TableDto tableDto) {
setDefaultSerdeIfNull(tableDto);
setDefaultDefinitionMetadataIfNull(tableDto);
setOwnerIfNull(tableDto);
setOwnerGroupIfAvailable(tableDto);
}
private void setDefaultDefinitionMetadataIfNull(final TableDto tableDto) {
ObjectNode definitionMetadata = tableDto.getDefinitionMetadata();
if (definitionMetadata == null) {
definitionMetadata = metacatJson.emptyObjectNode();
tableDto.setDefinitionMetadata(definitionMetadata);
}
}
private void setDefaultSerdeIfNull(final TableDto tableDto) {
StorageDto serde = tableDto.getSerde();
if (serde == null) {
serde = new StorageDto();
tableDto.setSerde(serde);
}
}
/**
* Sets the owner of the table. The order of priority of selecting the owner is:
* <pre>
* 1. Explicitly set in the table dto
* 2. Username from the request headers
* 3. Owner set in the serde
* </pre>
*
* @param tableDto the table DTO
*/
private void setOwnerIfNull(final TableDto tableDto) {
final List<String> potentialOwners = ownerValidationService.extractPotentialOwners(tableDto);
final String validOwner = potentialOwners.stream()
.filter(this::isOwnerValid)
.findFirst()
.orElse(null);
if (validOwner != null) {
updateTableOwner(tableDto, validOwner);
} else {
potentialOwners.stream()
.filter(Objects::nonNull)
.findFirst()
.ifPresent(nonNullOwner -> updateTableOwner(tableDto, nonNullOwner));
}
}
private void setOwnerGroupIfAvailable(final TableDto tableDto) {
final List<String> potentialOwnerGroups = ownerValidationService.extractPotentialOwnerGroups(tableDto);
potentialOwnerGroups.stream()
.filter(this::isOwnerGroupValid)
.findFirst()
.ifPresent(validOwnerGroup -> updateTableOwnerGroup(tableDto, validOwnerGroup));
}
void updateTableOwner(final TableDto tableDto, final String userId) {
final ObjectNode ownerNode = tableDto.getDefinitionMetadata().with("owner");
ownerNode.put("userId", userId);
}
void updateTableOwnerGroup(final TableDto tableDto, final String groupName) {
final ObjectNode ownerNode = tableDto.getDefinitionMetadata().with("owner");
ownerNode.put("google_group", groupName);
}
private boolean isOwnerValid(@Nullable final String userId) {
return ownerValidationService.isUserValid(userId);
}
private boolean isOwnerGroupValid(@Nullable final String groupName) {
return ownerValidationService.isGroupValid(groupName);
}
@SuppressFBWarnings
private void tag(final QualifiedName name, final ObjectNode definitionMetadata) {
final Set<String> tags = MetacatUtils.getTableTags(definitionMetadata);
if (!tags.isEmpty()) {
log.info("Setting tags {} for table {}", tags, name);
final Set<String> result = tagService.setTags(name, tags, false);
}
}
/**
* {@inheritDoc}
*/
@Override
public TableDto deleteAndReturn(final QualifiedName name, final boolean isMView) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
name, MetacatOperation.DELETE);
eventBus.post(new MetacatDeleteTablePreEvent(name, metacatRequestContext, this));
TableDto tableDto = new TableDto();
tableDto.setName(name);
try {
final Optional<TableDto> oTable = get(name,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
tableDto = oTable.orElse(tableDto);
} catch (Exception e) {
handleException(name, true, "deleteAndReturn_get", e);
}
// Fail if the table is tagged not to be deleted.
if (hasTags(tableDto, config.getNoTableDeleteOnTags())) {
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(tableDto, config.getNoTableDeleteOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Delete", name.toString()));
} else {
throw new IllegalArgumentException(
String.format("Table %s cannot be deleted because it is tagged with %s.", name,
config.getNoTableDeleteOnTags()));
}
}
// Try to delete the table even if get above fails
try {
connectorTableServiceProxy.delete(name);
// If this is a common view, the storage_table if present
// should also be deleted.
if (MetacatUtils.isCommonView(tableDto.getMetadata())
&& config.deleteCommonViewStorageTable()) {
final Optional<String> storageTableName = MetacatUtils
.getCommonViewStorageTable(tableDto.getMetadata());
if (storageTableName.isPresent()) {
final QualifiedName qualifiedStorageTableName = QualifiedName.ofTable(name.getCatalogName(),
name.getDatabaseName(), storageTableName.get());
deleteCommonViewStorageTable(name, qualifiedStorageTableName);
}
}
} catch (NotFoundException ignored) {
log.debug("NotFoundException ignored for table {}", name);
}
if (canDeleteMetadata(name)) {
// Delete the metadata. Type doesn't matter since we discard the result
log.info("Deleting user metadata for table {}", name);
userMetadataService.deleteMetadata(metacatRequestContext.getUserName(), Lists.newArrayList(tableDto));
log.info("Deleting tags for table {}", name);
tagService.delete(name, false);
} else {
if (config.canSoftDeleteDataMetadata() && tableDto.isDataExternal()) {
userMetadataService.softDeleteDataMetadata(metacatRequestContext.getUserName(),
Lists.newArrayList(tableDto.getDataUri()));
}
}
eventBus.post(new MetacatDeleteTablePostEvent(name, metacatRequestContext, this, tableDto, isMView));
return tableDto;
}
private boolean hasTags(@Nullable final TableDto tableDto, final Set<String> hasTags) {
if (!hasTags.isEmpty() && tableDto != null) {
final Set<String> tags = MetacatUtils.getTableTags(tableDto.getDefinitionMetadata());
if (!tags.isEmpty()) {
for (String t: hasTags) {
if (tags.contains(t)) {
return true;
}
}
}
}
return false;
}
/**
* Returns true
* 1. If the system is configured to delete deifnition metadata.
* 2. If the system is configured not to but the tableName is configured to either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean canDeleteMetadata(final QualifiedName tableName) {
return config.canDeleteTableDefinitionMetadata() || isEnabledForTableDefinitionMetadataDelete(tableName);
}
/**
* Returns true if tableName is enabled for deifnition metadata delete either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean isEnabledForTableDefinitionMetadataDelete(final QualifiedName tableName) {
final Set<QualifiedName> enableDeleteForQualifiedNames = config.getNamesEnabledForDefinitionMetadataDelete();
return enableDeleteForQualifiedNames.contains(tableName)
|| enableDeleteForQualifiedNames.contains(
QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()))
|| enableDeleteForQualifiedNames.contains(QualifiedName.ofCatalog(tableName.getCatalogName()));
}
/**
* {@inheritDoc}
*/
@Override
public Optional<TableDto> get(final QualifiedName name, final GetTableServiceParameters getTableServiceParameters) {
validate(name);
TableDto tableInternal = null;
final TableDto table;
final MetacatCatalogConfig catalogConfig = connectorManager.getCatalogConfig(name);
if (getTableServiceParameters.isIncludeInfo()
|| (getTableServiceParameters.isIncludeDefinitionMetadata() && catalogConfig.isInterceptorEnabled()
&& !getTableServiceParameters.isDisableOnReadMetadataIntercetor())) {
try {
final boolean useCache = getTableServiceParameters.isUseCache() && config.isCacheEnabled()
&& catalogConfig.isCacheEnabled();
tableInternal = converterUtil.toTableDto(
getFromTableServiceProxy(name, getTableServiceParameters, useCache));
} catch (NotFoundException ignored) {
return Optional.empty();
}
table = tableInternal;
} else {
table = new TableDto();
table.setName(name);
}
if (getTableServiceParameters.isIncludeDefinitionMetadata()) {
final Optional<ObjectNode> definitionMetadata =
(getTableServiceParameters.isDisableOnReadMetadataIntercetor())
? userMetadataService.getDefinitionMetadata(name)
: userMetadataService.getDefinitionMetadataWithInterceptor(name,
GetMetadataInterceptorParameters.builder().hasMetadata(tableInternal).build());
definitionMetadata.ifPresent(table::setDefinitionMetadata);
}
if (getTableServiceParameters.isIncludeDataMetadata() && catalogConfig.isHasDataExternal()) {
TableDto dto = table;
if (tableInternal == null && !getTableServiceParameters.isIncludeInfo()) {
try {
final boolean useCache = getTableServiceParameters.isUseCache() && config.isCacheEnabled();
dto = converterUtil.toTableDto(
getFromTableServiceProxy(name, getTableServiceParameters, useCache));
} catch (NotFoundException ignored) {
}
}
if (dto != null && dto.getSerde() != null) {
final Optional<ObjectNode> dataMetadata =
userMetadataService.getDataMetadata(dto.getSerde().getUri());
dataMetadata.ifPresent(table::setDataMetadata);
}
}
return Optional.of(table);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(
final QualifiedName oldName,
final QualifiedName newName,
final boolean isMView
) {
validate(oldName);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
oldName, MetacatOperation.RENAME);
final TableDto oldTable = get(oldName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(oldName));
// Fail if the table is tagged not to be renamed.
if (hasTags(oldTable, config.getNoTableRenameOnTags())) {
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(oldTable, config.getNoTableRenameOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Rename", oldName.toString()));
} else {
throw new IllegalArgumentException(
String.format("Table %s cannot be renamed because it is tagged with %s.", oldName,
config.getNoTableRenameOnTags()));
}
}
if (oldTable != null) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata
eventBus.post(new MetacatRenameTablePreEvent(oldName, metacatRequestContext, this, newName));
connectorTableServiceProxy.rename(oldName, newName, isMView);
userMetadataService.renameDefinitionMetadataKey(oldName, newName);
tagService.renameTableTags(oldName, newName.getTableName());
final TableDto dto = get(newName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.post(
new MetacatRenameTablePostEvent(oldName, metacatRequestContext, this, oldTable, dto, isMView));
}
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final TableDto tableDto) {
updateAndReturn(name, tableDto);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto updateAndReturn(final QualifiedName name, final TableDto tableDto) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto oldTable = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(name));
eventBus.post(new MetacatUpdateTablePreEvent(name, metacatRequestContext, this, oldTable, tableDto));
if (MetacatUtils.hasDoNotModifyForIcebergMigrationTag(oldTable, config.getNoTableUpdateOnTags())) {
throw new TableMigrationInProgressException(
MetacatUtils.getIcebergMigrationExceptionMsg("Updates", name.toString()));
}
//
// Check if the table schema info is provided. If provided, we should continue calling the update on the table
// schema. Uri may exist in the serde when updating data metadata for a table.
//
boolean ignoreErrorsAfterUpdate = false;
if (isTableInfoProvided(tableDto, oldTable)) {
ignoreErrorsAfterUpdate = connectorTableServiceProxy.update(name, converterUtil.fromTableDto(tableDto));
}
// we do ownership validation and enforcement only if table owner is set in the dto
// because if it is null, we do not update the owner in the existing metadata record
if (tableDto.getTableOwner().isPresent()) {
// only if the owner is different from the previous, we run the enforcement
// for backwards compatibility
if (!tableDto.getTableOwner().get().equals(oldTable.getTableOwner().orElse(null))) {
ownerValidationService.enforceOwnerValidation("updateTable", name, tableDto);
}
}
try {
// Merge in metadata if the user sent any
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "saveMetadata", e);
}
// ignoreErrorsAfterUpdate is currently set only for iceberg tables
if (config.isUpdateIcebergTableAsyncPostEventEnabled() && ignoreErrorsAfterUpdate) {
eventBus.post(new MetacatUpdateIcebergTablePostEvent(name,
metacatRequestContext, this, oldTable, tableDto));
return tableDto;
} else {
TableDto updatedDto = tableDto;
try {
updatedDto = get(name,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "getTable", e);
}
try {
eventBus.post(new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable,
updatedDto, updatedDto != tableDto));
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "postEvent", e);
}
return updatedDto;
}
}
/**
* Throws exception if the provided <code>ignoreErrorsAfterUpdate</code> is false. If true, it will swallow the
* exception and log it.
*
*/
private void handleException(final QualifiedName name,
final boolean ignoreErrorsAfterUpdate,
final String request,
final Exception ex) {
if (ignoreErrorsAfterUpdate) {
log.warn("Failed {} for table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableUpdateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
} else {
throw Throwables.propagate(ex);
}
}
/**
* Swallow the exception and log it.
*
*/
private void handleExceptionOnCreate(final QualifiedName name,
final String request,
final Exception ex) {
log.warn("Failed {} for create table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableCreateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
}
@VisibleForTesting
private boolean isTableInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
if ((tableDto.getFields() != null && !tableDto.getFields().isEmpty())
|| isSerdeInfoProvided(tableDto, oldTableDto)
|| (tableDto.getMetadata() != null && !tableDto.getMetadata().isEmpty())
|| tableDto.getAudit() != null) {
result = true;
}
return result;
}
private boolean isSerdeInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
final StorageDto serde = tableDto.getSerde();
if (serde == null) {
result = false;
} else {
final StorageDto oldSerde = oldTableDto.getSerde();
final String oldUri = oldSerde != null ? oldSerde.getUri() : null;
if (serde.getInputFormat() != null
|| serde.getOutputFormat() != null
|| serde.getOwner() != null
|| serde.getParameters() != null
|| serde.getSerdeInfoParameters() != null
|| serde.getSerializationLib() != null
|| (serde.getUri() != null && !Objects.equals(serde.getUri(), oldUri))) {
result = true;
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
deleteAndReturn(name, false);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto get(final QualifiedName name) {
//this is used for different purpose, need to change the ineral calls
final Optional<TableDto> dto = get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build());
return dto.orElse(null);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final QualifiedName sourceName, final QualifiedName targetName) {
// Source should be same
if (!sourceName.getCatalogName().equals(targetName.getCatalogName())) {
throw new MetacatNotSupportedException("Cannot copy a table from a different source");
}
// Error out when source table does not exists
final Optional<TableDto> oTable = get(sourceName,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (!oTable.isPresent()) {
throw new TableNotFoundException(sourceName);
}
// Error out when target table already exists
final Optional<TableDto> oTargetTable = get(targetName,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(true)
.includeInfo(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (oTargetTable.isPresent()) {
throw new TableNotFoundException(targetName);
}
return copy(oTable.get(), targetName);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final TableDto tableDto, final QualifiedName targetName) {
final QualifiedName databaseName =
QualifiedName.ofDatabase(targetName.getCatalogName(), targetName.getDatabaseName());
if (!databaseService.exists(databaseName)) {
final DatabaseDto databaseDto = new DatabaseDto();
databaseDto.setName(databaseName);
databaseService.create(databaseName, databaseDto);
}
final TableDto targetTableDto = new TableDto();
targetTableDto.setName(targetName);
targetTableDto.setFields(tableDto.getFields());
targetTableDto.setPartition_keys(tableDto.getPartition_keys());
final StorageDto storageDto = tableDto.getSerde();
if (storageDto != null) {
final StorageDto targetStorageDto = new StorageDto();
targetStorageDto.setInputFormat(storageDto.getInputFormat());
targetStorageDto.setOwner(storageDto.getOwner());
targetStorageDto.setOutputFormat(storageDto.getOutputFormat());
targetStorageDto.setParameters(storageDto.getParameters());
targetStorageDto.setUri(storageDto.getUri());
targetStorageDto.setSerializationLib(storageDto.getSerializationLib());
targetTableDto.setSerde(targetStorageDto);
}
create(targetName, targetTableDto);
return targetTableDto;
}
/**
* {@inheritDoc}
*/
@Override
public void saveMetadata(final QualifiedName name, final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
validate(name);
final Optional<TableDto> tableDtoOptional = get(name, GetTableServiceParameters.builder().includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.build());
if (tableDtoOptional.isPresent()) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto tableDto = tableDtoOptional.get();
tableDto.setDefinitionMetadata(definitionMetadata); //override the previous one
tableDto.setDataMetadata(dataMetadata);
log.info("Saving user metadata for table {}", name);
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
tag(name, tableDto.getDefinitionMetadata());
}
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uri, prefixSearch);
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uris, prefixSearch);
}
@Override
public List<QualifiedName> getQualifiedNames(final QualifiedName name,
final GetTableNamesServiceParameters parameters) {
if (Strings.isNullOrEmpty(parameters.getFilter())) {
throw new MetacatBadRequestException("Filter expression cannot be empty");
}
return connectorTableServiceProxy.getQualifiedNames(name, parameters);
}
private TableInfo getFromTableServiceProxy(final QualifiedName name,
final GetTableServiceParameters getTableServiceParameters,
final boolean useCache) {
return getTableServiceParameters.isIncludeMetadataLocationOnly()
? connectorTableServiceProxy.getWithMetadataLocationOnly(name, getTableServiceParameters, useCache)
: (getTableServiceParameters.isIncludeMetadataFromConnector()
? connectorTableServiceProxy.getWithInfoDetails(name, getTableServiceParameters, useCache)
: connectorTableServiceProxy.get(name, getTableServiceParameters, useCache));
}
private void deleteCommonViewStorageTable(final QualifiedName viewName,
final QualifiedName storageTableName) {
try {
log.warn("Deleting storage table: {} belonging to common view: {}",
storageTableName, viewName);
deleteAndReturn(storageTableName, false);
} catch (Exception e) {
// For now only register failures to drop
handleException(storageTableName, true, "deleteCommonViewStorageTable", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
return connectorTableServiceProxy.exists(name);
}
private void validate(final QualifiedName name) {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkArgument(name.isTableDefinition(), "Definition {} does not refer to a table", name);
}
}
| 2,143 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes metacat service implementation classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.impl;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,144 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/MetacatThriftInitService.java
|
package com.netflix.metacat.main.services.init;
import com.google.common.base.Throwables;
import com.netflix.metacat.main.services.MetacatThriftService;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Inits the thrift service.
*/
@Slf4j
@Getter
@Accessors(fluent = true)
@RequiredArgsConstructor
public class MetacatThriftInitService {
@NonNull
private final MetacatThriftService metacatThriftService;
@NonNull
private final MetacatCoreInitService coreInitService;
// Initial values are false
private final AtomicBoolean thriftStarted = new AtomicBoolean();
/**
* Metacat service shutdown.
*/
public void stop() {
log.info("Metacat application is stopped. Stopping services.");
try {
this.metacatThriftService.stop();
this.thriftStarted.set(false);
this.coreInitService.stop();
} catch (final Exception e) {
// Just log it since we're shutting down anyway shouldn't matter to propagate it
log.error("Unable to properly shutdown services due to {}", e.getMessage(), e);
}
log.info("Finished stopping services.");
}
/**
* Metacat service initialization.
*/
public void start() {
log.info("Metacat application starting. Starting internal services...");
try {
// TODO: Rather than doing this statically why don't we have things that need to be started implement
// some interface/order?
this.coreInitService.start();
this.metacatThriftService.start();
this.thriftStarted.set(true);
} catch (final Exception e) {
log.error("Unable to initialize services due to {}", e.getMessage(), e);
Throwables.propagate(e);
}
log.info("Finished starting internal services.");
}
}
| 2,145 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/MetacatCoreInitService.java
|
package com.netflix.metacat.main.services.init;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.main.manager.CatalogManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.manager.PluginManager;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationContext;
/**
* Inits the core catalog related dependencies.
*/
@Slf4j
@Getter
@Accessors(fluent = true)
@RequiredArgsConstructor
public class MetacatCoreInitService {
@NonNull
private final PluginManager pluginManager;
@NonNull
private final CatalogManager catalogManager;
@NonNull
private final ConnectorManager connectorManager;
@NonNull
private final ThreadServiceManager threadServiceManager;
@NonNull
private final ApplicationContext applicationContext;
/**
* Metacat service shutdown.
*/
public void stop() {
log.info("Metacat application is stopped. Stopping services.");
this.connectorManager.stop();
this.threadServiceManager.stop();
}
/**
* Metacat service initialization.
*
* @throws Exception if an error occurs during initialization.
*/
public void start() throws Exception {
log.info("Metacat application starting. Starting internal services...");
this.pluginManager.loadPlugins();
this.catalogManager.loadCatalogs(applicationContext);
}
}
| 2,146 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/init/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat initialization services.
*/
@javax.annotation.ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.init;
| 2,147 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/health/MetacatHealthIndicator.java
|
package com.netflix.metacat.main.services.health;
import com.netflix.metacat.main.services.init.MetacatCoreInitService;
import com.netflix.metacat.main.services.init.MetacatThriftInitService;
import lombok.RequiredArgsConstructor;
import org.apache.thrift.transport.TSocket;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
/**
* Metacat health indicator.
*/
@RequiredArgsConstructor
public class MetacatHealthIndicator implements HealthIndicator {
protected static final String PLUGIN_KEY = "pluginsLoaded";
protected static final String CATALOG_KEY = "catalogsLoaded";
protected static final String THRIFT_KEY = "thriftStarted";
private final MetacatCoreInitService coreInitService;
private final MetacatThriftInitService thriftInitService;
/**
* {@inheritDoc}
*/
@Override
public Health health() {
final boolean plugins = coreInitService.pluginManager().arePluginsLoaded();
final boolean catalogs = coreInitService.catalogManager().areCatalogsLoaded();
final boolean thrift = thriftInitService.thriftStarted().get()
&& thriftInitService.metacatThriftService()
.getCatalogThriftServices().parallelStream().map(c -> {
TSocket transport = null;
try {
transport = new TSocket("localhost", c.getPortNumber(), 100);
transport.open();
} catch (Exception e) {
return false;
} finally {
if (transport != null && transport.isOpen()) {
transport.close();
}
}
return true;
}).reduce(Boolean.TRUE, Boolean::equals);
final Health.Builder builder = plugins && catalogs && thrift ? Health.up() : Health.outOfService();
builder.withDetail(PLUGIN_KEY, plugins);
builder.withDetail(CATALOG_KEY, catalogs);
builder.withDetail(THRIFT_KEY, thrift);
return builder.build();
}
}
| 2,148 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/health/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat service health related classes.
*/
@javax.annotation.ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.health;
| 2,149 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchEventHandlers.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.server.events.AsyncListener;
import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.event.EventListener;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Event handlers for elastic search indexing.
*/
@Slf4j
@AsyncListener
public class ElasticSearchEventHandlers {
private final ElasticSearchUtil es;
private final MetacatJsonLocator metacatJsonLocator;
private final Config config;
private final Timer databaseCreateEventsDelayTimer;
private final Timer databaseCreateTimer;
private final Timer tableCreateEventsDelayTimer;
private final Timer tableCreateTimer;
private final Timer databaseDeleteEventsDelayTimer;
private final Timer databaseDeleteTimer;
private final Timer tableDeleteEventsDelayTimer;
private final Timer tableDeleteTimer;
private final Timer partitionDeleteEventsDelayTimer;
private final Timer partitionDeleteTimer;
private final Timer tableRenameEventsDelayTimer;
private final Timer tableRenameTimer;
private final Timer tableUpdateEventsDelayTimer;
private final Timer tableUpdateTimer;
private final Timer partitionSaveEventsDelayTimer;
private final Timer partitionSaveTimer;
/**
* Constructor.
*
* @param es elastic search util
* @param registry registry to spectator
* @param config configurations
*/
public ElasticSearchEventHandlers(final ElasticSearchUtil es,
final Registry registry,
final Config config) {
this.es = es;
this.metacatJsonLocator = new MetacatJsonLocator();
this.config = config;
this.databaseCreateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "database.create");
this.databaseCreateTimer = registry.timer(Metrics.TimerElasticSearchDatabaseCreate.getMetricName());
this.tableCreateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.create");
this.tableCreateTimer = registry.timer(Metrics.TimerElasticSearchTableCreate.getMetricName());
this.databaseDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "database.delete");
this.databaseDeleteTimer = registry.timer(Metrics.TimerElasticSearchDatabaseDelete.getMetricName());
this.tableDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.delete");
this.tableDeleteTimer = registry.timer(Metrics.TimerElasticSearchTableDelete.getMetricName());
this.partitionDeleteEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "partition.delete");
this.partitionDeleteTimer = registry.timer(Metrics.TimerElasticSearchPartitionDelete.getMetricName());
this.tableRenameEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.rename");
this.tableRenameTimer = registry.timer(Metrics.TimerElasticSearchTableRename.getMetricName());
this.tableUpdateEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "table.update");
this.tableUpdateTimer = registry.timer(Metrics.TimerElasticSearchTableUpdate.getMetricName());
this.partitionSaveEventsDelayTimer = registry.timer(Metrics.TimerElasticSearchEventsDelay.getMetricName(),
Metrics.TagEventsType.getMetricName(), "partition.save");
this.partitionSaveTimer = registry.timer(Metrics.TimerElasticSearchPartitionSave.getMetricName());
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatCreateDatabasePostEventHandler(final MetacatCreateDatabasePostEvent event) {
log.debug("Received CreateDatabaseEvent {}", event);
this.databaseCreateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.databaseCreateTimer.record(() -> {
final DatabaseDto dto = event.getDatabase();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.database.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatCreateTablePostEventHandler(final MetacatCreateTablePostEvent event) {
log.debug("Received CreateTableEvent {}", event);
this.tableCreateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableCreateTimer.record(() -> {
final TableDto dto = event.getTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteDatabasePostEventHandler(final MetacatDeleteDatabasePostEvent event) {
log.debug("Received DeleteDatabaseEvent {}", event);
this.databaseDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.databaseDeleteTimer.record(() -> {
final DatabaseDto dto = event.getDatabase();
es.softDelete(ElasticSearchDoc.Type.database.name(), dto.getName().toString(), event.getRequestContext());
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteTablePostEventHandler(final MetacatDeleteTablePostEvent event) {
log.debug("Received DeleteTableEvent {}", event);
this.tableDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableDeleteTimer.record(() -> {
final TableDto dto = event.getTable();
es.softDelete(ElasticSearchDoc.Type.table.name(), dto.getName().toString(), event.getRequestContext());
if (config.isElasticSearchPublishPartitionEnabled()) {
try {
final List<String> partitionIdsToBeDeleted =
es.getIdsByQualifiedName(ElasticSearchDoc.Type.partition.name(), dto.getName());
es.delete(ElasticSearchDoc.Type.partition.name(), partitionIdsToBeDeleted);
} catch (Exception e) {
log.warn("Failed deleting the partitions for the dropped table/view:{}", dto.getName());
}
}
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatDeleteTablePartitionPostEventHandler(final MetacatDeleteTablePartitionPostEvent event) {
log.debug("Received DeleteTablePartitionEvent {}", event);
this.partitionDeleteEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
if (config.isElasticSearchPublishPartitionEnabled()) {
this.partitionDeleteTimer.record(() -> {
final List<String> partitionIds = event.getPartitionIds();
final List<String> esPartitionIds = partitionIds.stream()
.map(partitionId -> event.getName().toString() + "/" + partitionId).collect(Collectors.toList());
es.softDelete(ElasticSearchDoc.Type.partition.name(), esPartitionIds, event.getRequestContext());
});
}
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatRenameTablePostEventHandler(final MetacatRenameTablePostEvent event) {
log.debug("Received RenameTableEvent {}", event);
this.tableRenameEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableRenameTimer.record(() -> {
es.delete(ElasticSearchDoc.Type.table.name(), event.getName().toString());
final TableDto dto = event.getCurrentTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
});
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatUpdateTablePostEventHandler(final MetacatUpdateTablePostEvent event) {
log.debug("Received UpdateTableEvent {}", event);
this.tableUpdateEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
this.tableUpdateTimer.record(() -> {
final TableDto dto = event.getCurrentTable();
final ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(), dto,
event.getRequestContext().getUserName(), false);
final ElasticSearchDoc oldDoc = es.get(ElasticSearchDoc.Type.table.name(), doc.getId());
es.save(ElasticSearchDoc.Type.table.name(), doc.getId(), doc);
if (config.isElasticSearchUpdateTablesWithSameUriEnabled()
&& (oldDoc == null || oldDoc.getDto() == null
|| !Objects.equals(((TableDto) oldDoc.getDto()).getDataMetadata(), dto.getDataMetadata()))) {
updateEntitiesWithSameUri(ElasticSearchDoc.Type.table.name(),
dto, event.getRequestContext().getUserName());
}
});
}
private void updateEntitiesWithSameUri(final String metadataType, final TableDto dto,
final String userName) {
if (dto.isDataExternal()) {
final List<String> ids = es.getTableIdsByUri(metadataType, dto.getDataUri())
.stream().filter(s -> !s.equals(dto.getName().toString())).collect(Collectors.toList());
if (!ids.isEmpty()) {
log.info("ElasticSearch table updates({}) with same uri {} (Table:{})",
ids.size(), dto.getDataUri(), dto.getName());
final ObjectNode node = metacatJsonLocator.emptyObjectNode();
node.set(ElasticSearchDoc.Field.DATA_METADATA, dto.getDataMetadata());
node.put(ElasticSearchDoc.Field.USER, userName);
node.put(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli());
es.updates(ElasticSearchDoc.Type.table.name(), ids, node);
}
}
}
/**
* Subscriber.
*
* @param event event
*/
@EventListener
public void metacatSaveTablePartitionPostEventHandler(final MetacatSaveTablePartitionPostEvent event) {
log.debug("Received SaveTablePartitionEvent {}", event);
this.partitionSaveEventsDelayTimer
.record(System.currentTimeMillis() - event.getRequestContext().getTimestamp(), TimeUnit.MILLISECONDS);
if (config.isElasticSearchPublishPartitionEnabled()) {
this.partitionSaveTimer.record(() -> {
final List<PartitionDto> partitionDtos = event.getPartitions();
final MetacatRequestContext context = event.getRequestContext();
final List<ElasticSearchDoc> docs = partitionDtos.stream()
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, context.getUserName(), false))
.collect(Collectors.toList());
es.save(ElasticSearchDoc.Type.partition.name(), docs);
});
}
}
}
| 2,150 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtil.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import org.joda.time.Instant;
import javax.annotation.Nullable;
import java.util.List;
/**
* Utility class for index, update, delete metacat doc from elastic search.
*/
public interface ElasticSearchUtil {
/**
* Delete the records for the given type.
*
* @param metacatRequestContext context
* @param type doc type
* @param softDelete if true, marks the doc for deletion
*/
void delete(MetacatRequestContext metacatRequestContext, String type,
boolean softDelete);
/**
* Delete index documents.
*
* @param type index type
* @param ids entity ids
*/
void delete(String type, List<String> ids);
/**
* Delete index document.
*
* @param type index type
* @param id entity id
*/
void delete(String type, String id);
/**
* Gets the document for the given type and id.
*
* @param type doc type
* @param id doc id
* @return doc
*/
ElasticSearchDoc get(String type, String id);
/**
* Gets the document for the given type and id.
*
* @param type doc type
* @param id doc id
* @param index the es index
* @return doc
*/
ElasticSearchDoc get(String type, String id, String index);
/**
* List of names.
*
* @param type type
* @param qualifiedName name
* @return list of names
*/
List<String> getIdsByQualifiedName(String type, QualifiedName qualifiedName);
/**
* Search the names by names and by the given marker.
*
* @param type type
* @param qualifiedNames names
* @param marker marker
* @param excludeQualifiedNames exclude names
* @param valueType dto type
* @param <T> dto type
* @return dto
*/
<T> List<T> getQualifiedNamesByMarkerByNames(String type, List<QualifiedName> qualifiedNames,
Instant marker,
List<QualifiedName> excludeQualifiedNames,
Class<T> valueType);
/**
* List table names.
*
* @param type doc type
* @param qualifiedNames names
* @param excludeQualifiedNames exclude names
* @return list of table names
*/
List<String> getTableIdsByCatalogs(String type, List<QualifiedName> qualifiedNames,
List<QualifiedName> excludeQualifiedNames);
/**
* List table names by uri.
*
* @param type doc type
* @param dataUri uri
* @return list of table names
*/
List<String> getTableIdsByUri(String type, String dataUri);
/**
* Wrapper for logging the message in elastic search esIndex.
*
* @param method method
* @param type type
* @param name name
* @param data data
* @param logMessage message
* @param ex exception
* @param error is an error
*/
void log(String method, String type, String name, @Nullable String data,
String logMessage, @Nullable Exception ex, boolean error);
/**
* Elastic search index refresh.
*/
void refresh();
/**
* Bulk save of the entities.
*
* @param type index type
* @param docs metacat documents
*/
void save(String type, List<ElasticSearchDoc> docs);
/**
* Save of a single entity.
*
* @param type index type
* @param id id of the entity
* @param doc metacat documents
*/
void save(String type, String id, ElasticSearchDoc doc);
/**
* Full text search.
*
* @param searchString search text
* @return list of table info
*/
List<TableDto> simpleSearch(String searchString);
/**
* Marks the documents as deleted.
*
* @param type index type
* @param ids list of entity ids
* @param metacatRequestContext context containing the user name
*/
void softDelete(String type, List<String> ids,
MetacatRequestContext metacatRequestContext);
/**
* Marks the document as deleted.
*
* @param type index type
* @param id entity id
* @param metacatRequestContext context containing the user name
*/
void softDelete(String type, String id, MetacatRequestContext metacatRequestContext);
/**
* Creates JSON from elasticSearchdoc object.
*
* @param elasticSearchDoc elastic search doc.
* @return Json String
*/
String toJsonString(ElasticSearchDoc elasticSearchDoc);
/**
* Updates the documents with partial updates with the given fields.
*
* @param type index type
* @param ids list of entity ids
* @param node Object node to update the doc
*/
void updates(String type, List<String> ids, ObjectNode node);
}
| 2,151 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtilImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportException;
import org.joda.time.Instant;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Utility class for index, update, delete metacat doc from elastic search.
*/
@Slf4j
public class ElasticSearchUtilImpl implements ElasticSearchUtil {
private static final Retryer<Void> RETRY_ES_PUBLISH = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(FailedNodeException.class)
.retryIfExceptionOfType(NodeClosedException.class)
.retryIfExceptionOfType(NoNodeAvailableException.class)
.retryIfExceptionOfType(ReceiveTimeoutTransportException.class)
.retryIfExceptionOfType(TransportException.class)
.retryIfExceptionOfType(ElasticsearchTimeoutException.class)
.retryIfExceptionOfType(EsRejectedExecutionException.class)
.retryIfExceptionOfType(CancellableThreads.ExecutionCancelledException.class)
.withWaitStrategy(WaitStrategies.incrementingWait(10, TimeUnit.MILLISECONDS, 30, TimeUnit.MILLISECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
private static final int NO_OF_CONFLICT_RETRIES = 3;
private final Client client;
private final String esIndex;
private final Config config;
private final MetacatJson metacatJson;
private XContentType contentType = Requests.INDEX_CONTENT_TYPE;
private final Registry registry;
private final TimeValue esCallTimeout;
private final TimeValue esBulkCallTimeout;
/**
* Constructor.
*
* @param client elastic search client
* @param config config
* @param metacatJson json utility
* @param registry spectator registry
*/
public ElasticSearchUtilImpl(
@Nullable final Client client,
final Config config,
final MetacatJson metacatJson,
final Registry registry) {
this.config = config;
this.client = client;
this.metacatJson = metacatJson;
this.esIndex = config.getEsIndex();
this.registry = registry;
this.esCallTimeout = TimeValue.timeValueSeconds(config.getElasticSearchCallTimeout());
this.esBulkCallTimeout = TimeValue.timeValueSeconds(config.getElasticSearchBulkCallTimeout());
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final String type, final String id) {
try {
RETRY_ES_PUBLISH.call(() -> {
client.prepareDelete(esIndex, type, id).execute().actionGet(esCallTimeout);
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.delete", type, id, e,
Metrics.CounterElasticSearchDelete.getMetricName());
}
}
private void handleException(final String request,
final String type,
final String id,
final Exception exception,
final String metricName) {
log.error("Failed {} metadata of type {} with id {}. {}", request, type, id, exception);
String exceptionName = exception.getClass().getSimpleName();
if (exception instanceof RetryException) {
final Throwable error = ((RetryException) exception).getLastFailedAttempt().getExceptionCause();
if (error != null) {
exceptionName = error.getClass().getSimpleName();
}
}
final Map<String, String> tags = ImmutableMap
.<String, String>builder().put("status", "failure").put("name", id).put("exception", exceptionName).build();
registry.counter(registry.createId(metricName).withTags(tags)).increment();
log(request, type, id, null, exception.getMessage(), exception, true);
}
private void handleException(final String request,
final String type,
final List<String> ids,
final Exception exception,
final String metricName) {
log.error("Failed {} metadata of type {} with ids {}. {}", request, type, ids, exception);
String exceptionName = exception.getClass().getSimpleName();
if (exception instanceof RetryException) {
final Throwable error = ((RetryException) exception).getLastFailedAttempt().getExceptionCause();
if (error != null) {
exceptionName = error.getClass().getSimpleName();
}
}
final Map<String, String> tags = ImmutableMap
.<String, String>builder().put("status", "failure").put("exception", exceptionName).build();
registry.counter(registry.createId(metricName).withTags(tags)).increment();
log(request, type, ids.toString(), null, exception.getMessage(), exception, true);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final String type, final List<String> ids) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 10000);
partitionedIds.forEach(subIds -> hardDeleteDoc(type, subIds));
}
}
/**
* {@inheritDoc}
*/
@Override
public void softDelete(final String type, final String id, final MetacatRequestContext metacatRequestContext) {
try {
RETRY_ES_PUBLISH.call(() -> {
final XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(ElasticSearchDoc.Field.DELETED, true)
.field(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli())
.field(ElasticSearchDoc.Field.USER,
metacatRequestContext.getUserName()).endObject();
client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES).setDoc(builder).get(esCallTimeout);
ensureMigrationByCopy(type, Collections.singletonList(id));
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.softDelete", type, id, e,
Metrics.CounterElasticSearchDelete.getMetricName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void softDelete(final String type, final List<String> ids,
final MetacatRequestContext metacatRequestContext) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 100);
partitionedIds.forEach(subIds -> softDeleteDoc(type, subIds, metacatRequestContext));
partitionedIds.forEach(subIds -> ensureMigrationByCopy(type, subIds));
}
}
/**
* {@inheritDoc}
*/
@Override
public void updates(final String type, final List<String> ids, final ObjectNode node) {
if (ids != null && !ids.isEmpty()) {
final List<List<String>> partitionedIds = Lists.partition(ids, 100);
partitionedIds.forEach(subIds -> updateDocs(type, subIds, node));
partitionedIds.forEach(subIds -> ensureMigrationByCopy(type, subIds));
}
}
private void updateDocs(final String type, final List<String> ids, final ObjectNode node) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
ids.forEach(id -> {
bulkRequest.add(client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES)
.setDoc(metacatJson.toJsonAsBytes(node), XContentType.JSON));
});
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.updateDocs.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchUpdate.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.updatDocs", type, ids, e,
Metrics.CounterElasticSearchBulkUpdate.getMetricName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void save(final String type, final String id, final ElasticSearchDoc doc) {
saveToIndex(type, id, doc, esIndex);
ensureMigrationByCopy(type, Collections.singletonList(id));
}
/**
* {@inheritDoc}
*/
@Override
public void save(final String type, final List<ElasticSearchDoc> docs) {
if (docs != null && !docs.isEmpty()) {
final List<List<ElasticSearchDoc>> partitionedDocs = Lists.partition(docs, 100);
partitionedDocs.forEach(subDocs -> bulkSaveToIndex(type, subDocs, esIndex));
partitionedDocs.forEach(subDocs -> ensureMigrationBySave(type, subDocs));
}
}
/**
* {@inheritDoc}
*/
@Override
public String toJsonString(final ElasticSearchDoc elasticSearchDoc) {
final String result = metacatJson.toJsonString(toJsonObject(elasticSearchDoc));
return result.replace("{}", "null");
}
private ObjectNode toJsonObject(final ElasticSearchDoc elasticSearchDoc) {
final ObjectNode oMetadata = metacatJson.toJsonObject(elasticSearchDoc.getDto());
//add the searchable definition metadata
elasticSearchDoc.addSearchableDefinitionMetadata(oMetadata);
//Adding the timestamp explicitly
oMetadata.put(ElasticSearchDoc.Field.TIMESTAMP, elasticSearchDoc.getTimestamp());
//True if this entity has been deleted
oMetadata.put(ElasticSearchDoc.Field.DELETED, elasticSearchDoc.isDeleted());
//True if this entity has been deleted
oMetadata.put(ElasticSearchDoc.Field.USER, elasticSearchDoc.getUser());
if (elasticSearchDoc.getRefreshMarker() != null) {
oMetadata.put(ElasticSearchDoc.Field.REFRESH_MARKER, elasticSearchDoc.getRefreshMarker());
}
return oMetadata;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getTableIdsByUri(final String type, final String dataUri) {
List<String> ids = Lists.newArrayList();
// Run the query and get the response.
if (dataUri != null) {
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.termQuery("serde.uri", dataUri))
.setSize(Integer.MAX_VALUE)
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
ids = getIds(response);
}
}
return ids;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getTableIdsByCatalogs(final String type, final List<QualifiedName> qualifiedNames,
final List<QualifiedName> excludeQualifiedNames) {
List<String> ids = Lists.newArrayList();
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termsQuery("name.qualifiedName.tree", qualifiedNames))
.must(QueryBuilders.termQuery("deleted_", false))
.mustNot(QueryBuilders.termsQuery("name.qualifiedName.tree", excludeQualifiedNames));
// Run the query and get the response.
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE) // TODO May break if too many tables returned back, change to Scroll
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
ids = getIds(response);
}
return ids;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getIdsByQualifiedName(final String type, final QualifiedName qualifiedName) {
List<String> result = Lists.newArrayList();
// Run the query and get the response.
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termQuery("name.qualifiedName.tree", qualifiedName))
.must(QueryBuilders.termQuery("deleted_", false));
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE)
.setFetchSource(false);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result = getIds(response);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public <T> List<T> getQualifiedNamesByMarkerByNames(final String type,
final List<QualifiedName> qualifiedNames,
final Instant marker,
final List<QualifiedName> excludeQualifiedNames,
final Class<T> valueType) {
final List<T> result = Lists.newArrayList();
final List<String> names = qualifiedNames.stream().map(QualifiedName::toString).collect(Collectors.toList());
final List<String> excludeNames = excludeQualifiedNames.stream().map(QualifiedName::toString)
.collect(Collectors.toList());
//
// Run the query and get the response.
final QueryBuilder queryBuilder = QueryBuilders.boolQuery()
.must(QueryBuilders.termsQuery("name.qualifiedName.tree", names))
.must(QueryBuilders.termQuery("deleted_", false))
.must(QueryBuilders.rangeQuery(ElasticSearchDoc.Field.TIMESTAMP).lte(marker.getMillis()))
.mustNot(QueryBuilders.termsQuery("name.qualifiedName.tree", excludeNames))
.mustNot(QueryBuilders.termQuery("refreshMarker_", marker.toString()));
final SearchRequestBuilder request = client.prepareSearch(esIndex)
.setTypes(type)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setSize(Integer.MAX_VALUE);
final SearchResponse response = request.execute().actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result.addAll(parseResponse(response, valueType));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void refresh() {
client.admin().indices().refresh(new RefreshRequest(esIndex)).actionGet();
}
/**
* {@inheritDoc}
*/
@Override
public ElasticSearchDoc get(final String type, final String id) {
return get(type, id, esIndex);
}
/**
* {@inheritDoc}
*/
@Override
public ElasticSearchDoc get(final String type, final String id, final String index) {
ElasticSearchDoc result = null;
final GetResponse response = client.prepareGet(index, type, id).execute().actionGet(esCallTimeout);
if (response.isExists()) {
result = parse(response);
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final MetacatRequestContext metacatRequestContext, final String type,
final boolean softDelete) {
SearchResponse response = client.prepareSearch(esIndex)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setScroll(new TimeValue(config.getElasticSearchScrollTimeout()))
.setSize(config.getElasticSearchScrollFetchSize())
.setQuery(QueryBuilders.termQuery("_type", type))
.setFetchSource(false)
.execute()
.actionGet(esCallTimeout);
while (true) {
response = client.prepareSearchScroll(response.getScrollId())
.setScroll(new TimeValue(config.getElasticSearchScrollTimeout())).execute().actionGet(esCallTimeout);
//Break condition: No hits are returned
if (response.getHits().getHits().length == 0) {
break;
}
final List<String> ids = getIds(response);
if (softDelete) {
softDelete(type, ids, metacatRequestContext);
} else {
delete(type, ids);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void log(final String method, final String type, final String name, @Nullable final String data,
final String logMessage, @Nullable final Exception ex, final boolean error) {
log(method, type, name, data, logMessage, ex, error, esIndex);
}
/**
* Log the message in elastic search.
*
* @param method method
* @param type type
* @param name name
* @param data data
* @param logMessage message
* @param ex exception
* @param error is an error
* @param index es index
*/
private void log(final String method, final String type, final String name, @Nullable final String data,
final String logMessage, @Nullable final Exception ex, final boolean error, final String index) {
if (config.isElasticSearchPublishMetacatLogEnabled()) {
try {
final Map<String, Object> source = Maps.newHashMap();
source.put("method", method);
source.put("qname", name);
source.put("type", type);
source.put("data", data);
source.put("error", error);
source.put("message", logMessage);
source.put("details", Throwables.getStackTraceAsString(ex));
client.prepareIndex(index, "metacat-log").setSource(source).execute().actionGet(esCallTimeout);
} catch (Exception e) {
registry.counter(registry.createId(Metrics.CounterElasticSearchLog.getMetricName())
.withTags(Metrics.tagStatusFailureMap)).increment();
log.warn("Failed saving the log message in elastic search for index{} method {}, name {}. Message: {}",
index, method, name, e.getMessage());
}
}
}
/**
* {@inheritDoc}
*/
@Override
public List<TableDto> simpleSearch(final String searchString) {
final List<TableDto> result = Lists.newArrayList();
final SearchResponse response = client.prepareSearch(esIndex)
.setTypes(ElasticSearchDoc.Type.table.name())
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.termQuery("_all", searchString))
.setSize(Integer.MAX_VALUE)
.execute()
.actionGet(esCallTimeout);
if (response.getHits().getHits().length != 0) {
result.addAll(parseResponse(response, TableDto.class));
}
return result;
}
/**
* Permanently delete index documents.
*
* @param type index type
* @param ids entity ids
*/
private void hardDeleteDoc(final String type, final List<String> ids) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
ids.forEach(id -> bulkRequest.add(client.prepareDelete(esIndex, type, id)));
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
log.info("Deleting metadata of type {} with count {}", type, ids.size());
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkDelete.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchDelete.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.bulkDelete", type, ids, e,
Metrics.CounterElasticSearchBulkDelete.getMetricName());
}
}
/**
* Get class from elastic search doc type.
*
* @param type type in string
* @return object class
*/
private Class getClass(final String type) {
return ElasticSearchDoc.Type.valueOf(type).getClazz();
}
private ElasticSearchDoc parse(final GetResponse response) {
ElasticSearchDoc result = null;
if (response.isExists()) {
final Map<String, Object> responseMap = response.getSourceAsMap();
final String user = (String) responseMap.get(ElasticSearchDoc.Field.USER);
final boolean deleted = (boolean) responseMap.get(ElasticSearchDoc.Field.DELETED);
final long timestamp = (long) responseMap.get(ElasticSearchDoc.Field.TIMESTAMP);
@SuppressWarnings("unchecked") final Object dto = metacatJson.parseJsonValue(
response.getSourceAsBytes(),
getClass(response.getType())
);
result = new ElasticSearchDoc(response.getId(), dto, user, deleted, timestamp);
}
return result;
}
/*
* Read the documents from source index then copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void copyDocToMergeIndex(final String type, final List<String> ids) {
final List<ElasticSearchDoc> docs = new ArrayList<>();
ids.forEach(id -> {
final ElasticSearchDoc doc = get(type, id);
if (doc != null) {
docs.add(doc);
}
});
bulkSaveToIndex(type, docs, config.getMergeEsIndex());
}
/*
* Check if in migration mode, copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void ensureMigrationByCopy(final String type, final List<String> ids) {
if (!Strings.isNullOrEmpty(config.getMergeEsIndex())) {
copyDocToMergeIndex(type, ids);
}
}
/*
* Check if in migration mode, copy to merge index
* @param type index type
* @param ids list of doc ids
*/
private void ensureMigrationBySave(final String type, final List<ElasticSearchDoc> docs) {
if (!Strings.isNullOrEmpty(config.getMergeEsIndex())) {
log.info("Bulk save to mergeEsIndex = {}", config.getMergeEsIndex());
bulkSaveToIndex(type, docs, config.getMergeEsIndex());
}
}
/* Use elasticSearch bulk API to mark the documents as deleted
* @param type index type
* @param ids list of entity ids
* @param metacatRequestContext context containing the user name
*/
private void softDeleteDoc(
final String type,
final List<String> ids,
final MetacatRequestContext metacatRequestContext) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
final XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(ElasticSearchDoc.Field.DELETED, true)
.field(ElasticSearchDoc.Field.TIMESTAMP, java.time.Instant.now().toEpochMilli())
.field(ElasticSearchDoc.Field.USER, metacatRequestContext.getUserName()).endObject();
ids.forEach(id -> bulkRequest.add(client.prepareUpdate(esIndex, type, id)
.setRetryOnConflict(NO_OF_CONFLICT_RETRIES).setDoc(builder)));
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkSoftDelete.item", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchDelete.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.bulkSoftDelete", type, ids, e,
Metrics.CounterElasticSearchBulkDelete.getMetricName());
}
}
/**
* Save of a single entity to an index.
*
* @param type index type
* @param id id of the entity
* @param doc source string of the entity
* @param index the index name
*/
private void saveToIndex(final String type, final String id, final ElasticSearchDoc doc, final String index) {
try {
RETRY_ES_PUBLISH.call(() -> {
final IndexRequestBuilder indexRequestBuilder = prepareIndexRequest(index, type, doc);
if (indexRequestBuilder != null) {
indexRequestBuilder.execute().actionGet(esCallTimeout);
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.saveToIndex", type, id, e,
Metrics.CounterElasticSearchSave.getMetricName());
}
}
private static List<String> getIds(final SearchResponse response) {
final List<String> ret = Lists.newArrayList();
for (SearchHit hit : response.getHits().getHits()) {
ret.add(hit.getId());
}
return ret;
}
private <T> List<T> parseResponse(final SearchResponse response, final Class<T> valueType) {
final List<T> ret = Lists.newArrayList();
for (SearchHit hit : response.getHits().getHits()) {
try {
ret.add(metacatJson.parseJsonValue(hit.getSourceAsString(), valueType));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
return ret;
}
/**
* Bulk save of the entities.
*
* @param type index type
* @param docs metacat documents
*/
private void bulkSaveToIndex(final String type, final List<ElasticSearchDoc> docs, final String index) {
if (docs != null && !docs.isEmpty()) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
for (ElasticSearchDoc doc : docs) {
final IndexRequestBuilder indexRequestBuilder = prepareIndexRequest(index, type, doc);
if (indexRequestBuilder != null) {
bulkRequest.add(indexRequestBuilder);
}
}
if (bulkRequest.numberOfActions() > 0) {
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
log.info("Bulk saving metadata of index {} type {} with size {}.",
index, type, docs.size());
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.bulkSaveToIndex.index", type, item.getId(),
item.getFailure().getCause(), Metrics.CounterElasticSearchSave.getMetricName());
}
}
}
}
return null;
});
} catch (Exception e) {
final List<String> docIds = docs.stream().map(ElasticSearchDoc::getId).collect(Collectors.toList());
handleException("ElasticSearchUtil.bulkSaveToIndex", type, docIds, e,
Metrics.CounterElasticSearchBulkSave.getMetricName());
}
}
}
IndexRequestBuilder prepareIndexRequest(final String index,
final String type,
final ElasticSearchDoc doc) {
return client.prepareIndex(index, type, doc.getId()).setSource(toJsonString(doc), XContentType.JSON);
}
}
| 2,152 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchCatalogTraversalAction.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.main.services.CatalogTraversal;
import com.netflix.metacat.main.services.CatalogTraversalAction;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* This class does a refresh of all the metadata entities from original data sources to elastic search.
*
* @author amajumdar
*/
@Slf4j
public class ElasticSearchCatalogTraversalAction implements CatalogTraversalAction {
private final Config config;
private final DatabaseService databaseService;
private final TableService tableService;
private final ElasticSearchUtil elasticSearchUtil;
private final UserMetadataService userMetadataService;
private final TagService tagService;
private final MetacatEventBus eventBus;
private final Registry registry;
/**
* Constructor.
*
* @param config System config
* @param eventBus Event bus
* @param databaseService Database service
* @param tableService Table service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param registry registry of spectator
* @param elasticSearchUtil ElasticSearch client wrapper
*/
public ElasticSearchCatalogTraversalAction(
@Nonnull @NonNull final Config config,
@Nonnull @NonNull final MetacatEventBus eventBus,
@Nonnull @NonNull final DatabaseService databaseService,
@Nonnull @NonNull final TableService tableService,
@Nonnull @NonNull final UserMetadataService userMetadataService,
@Nonnull @NonNull final TagService tagService,
@Nonnull @NonNull final ElasticSearchUtil elasticSearchUtil,
@Nonnull @NonNull final Registry registry
) {
this.config = config;
this.eventBus = eventBus;
this.databaseService = databaseService;
this.tableService = tableService;
this.userMetadataService = userMetadataService;
this.tagService = tagService;
this.elasticSearchUtil = elasticSearchUtil;
this.registry = registry;
}
@Override
public void done(final CatalogTraversal.Context context) {
deleteUnmarkedEntities(context);
}
private void deleteUnmarkedEntities(final CatalogTraversal.Context context) {
log.info("Start: Delete unmarked entities");
//
// get unmarked qualified names
// check if it not exists
// delete
//
elasticSearchUtil.refresh();
final MetacatRequestContext requestContext = MetacatRequestContext.builder().userName("admin").
clientAppName("metacat-refresh")
.apiUri("esRefresh")
.scheme("internal").build();
final List<DatabaseDto> unmarkedDatabaseDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("database", context.getQNames(), context.getStartInstant(),
context.getExcludeQNames(),
DatabaseDto.class);
if (!unmarkedDatabaseDtos.isEmpty()) {
if (unmarkedDatabaseDtos.size() <= config.getElasticSearchThresholdUnmarkedDatabasesDelete()) {
log.info("Traversal Done: Start: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
final List<String> unmarkedDatabaseNames = Lists.newArrayList();
final List<DatabaseDto> deleteDatabaseDtos = unmarkedDatabaseDtos.stream().filter(databaseDto -> {
boolean result = false;
try {
unmarkedDatabaseNames.add(databaseDto.getName().toString());
result = !databaseService.exists(databaseDto.getName());
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
databaseDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked databases({}): {}", unmarkedDatabaseNames.size(), unmarkedDatabaseNames);
log.info("Deleting databases({})", deleteDatabaseDtos.size());
if (!deleteDatabaseDtos.isEmpty()) {
final List<QualifiedName> deleteDatabaseQualifiedNames = deleteDatabaseDtos.stream()
.map(DatabaseDto::getName)
.collect(Collectors.toList());
final List<String> deleteDatabaseNames = deleteDatabaseQualifiedNames.stream().map(
QualifiedName::toString).collect(Collectors.toList());
log.info("Deleting databases({}): {}", deleteDatabaseNames.size(), deleteDatabaseNames);
userMetadataService.deleteDefinitionMetadata(deleteDatabaseQualifiedNames);
elasticSearchUtil.softDelete("database", deleteDatabaseNames, requestContext);
}
log.info("End: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
} else {
log.info("Count of unmarked databases({}) is more than the threshold {}", unmarkedDatabaseDtos.size(),
config.getElasticSearchThresholdUnmarkedDatabasesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedDatabaseThreshholdReached.getMetricName()))
.increment();
}
}
final List<TableDto> unmarkedTableDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("table",
context.getQNames(), context.getStartInstant(), context.getExcludeQNames(), TableDto.class);
if (!unmarkedTableDtos.isEmpty()) {
if (unmarkedTableDtos.size() <= config.getElasticSearchThresholdUnmarkedTablesDelete()) {
log.info("Start: Delete unmarked tables({})", unmarkedTableDtos.size());
final List<String> unmarkedTableNames = Lists.newArrayList();
final List<TableDto> deleteTableDtos = unmarkedTableDtos.stream().filter(tableDto -> {
boolean result = false;
try {
unmarkedTableNames.add(tableDto.getName().toString());
result = !tableService.exists(tableDto.getName());
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
tableDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked tables({}): {}", unmarkedTableNames.size(), unmarkedTableNames);
log.info("Deleting tables({})", deleteTableDtos.size());
if (!deleteTableDtos.isEmpty()) {
final List<String> deleteTableNames = deleteTableDtos.stream().map(
dto -> dto.getName().toString()).collect(Collectors.toList());
log.info("Deleting tables({}): {}", deleteTableNames.size(), deleteTableNames);
userMetadataService.deleteMetadata("admin", Lists.newArrayList(deleteTableDtos));
// Publish event. Elasticsearch event handler will take care of updating the index already
// TODO: Re-evaluate events vs. direct calls for these types of situations like in Genie
deleteTableDtos.forEach(
tableDto -> {
tagService.delete(tableDto.getName(), false);
this.eventBus.post(
new MetacatDeleteTablePostEvent(tableDto.getName(), requestContext, this, tableDto)
);
}
);
}
log.info("Traversal Done: End: Delete unmarked tables({})", unmarkedTableDtos.size());
} else {
log.info("Count of unmarked tables({}) is more than the threshold {}", unmarkedTableDtos.size(),
config.getElasticSearchThresholdUnmarkedTablesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedTableThreshholdReached.getMetricName()))
.increment();
}
}
log.info("End: Delete unmarked entities");
}
/**
* Save all databases to index it in elastic search.
*
* @param context traversal context
* @param dtos database dtos
*/
@Override
public void applyDatabases(final CatalogTraversal.Context context, final List<DatabaseDto> dtos) {
final List<ElasticSearchDoc> docs = dtos.stream()
.filter(Objects::nonNull)
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, "admin", false, context.getRunId()))
.collect(Collectors.toList());
elasticSearchUtil.save(ElasticSearchDoc.Type.database.name(), docs);
}
/**
* Save all tables to index it in elastic search.
*
* @param context traversal context
* @param dtos table dtos
*/
@Override
public void applyTables(final CatalogTraversal.Context context, final List<Optional<TableDto>> dtos) {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null && dto.isPresent()).map(
tableDtoOptional -> {
final TableDto dto = tableDtoOptional.get();
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, context.getRunId());
}).collect(Collectors.toList());
elasticSearchUtil.save(ElasticSearchDoc.Type.table.name(), docs);
}
}
| 2,153 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchRefresh.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.google.common.base.Functions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.CatalogMappingDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetDatabaseServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.Instant;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* This class does a refresh of all the metadata entities from original data sources to elastic search.
*
* @author amajumdar
*/
@Slf4j
@Deprecated
public class ElasticSearchRefresh {
private static final Predicate<Object> NOT_NULL = Objects::nonNull;
private static AtomicBoolean isElasticSearchMetacatRefreshAlreadyRunning = new AtomicBoolean(false);
private final CatalogService catalogService;
private final Config config;
private final DatabaseService databaseService;
private final TableService tableService;
private final PartitionService partitionService;
private final ElasticSearchUtil elasticSearchUtil;
private final UserMetadataService userMetadataService;
private final TagService tagService;
private final MetacatEventBus eventBus;
private Instant refreshMarker;
private String refreshMarkerText;
private Registry registry;
// Fixed thread pool
private ListeningExecutorService service;
private ListeningExecutorService esService;
private ExecutorService defaultService;
/**
* Constructor.
*
* @param config System config
* @param eventBus Event bus
* @param catalogService Catalog service
* @param databaseService Database service
* @param tableService Table service
* @param partitionService Partition service
* @param userMetadataService User metadata service
* @param tagService Tag service
* @param registry registry of spectator
* @param elasticSearchUtil ElasticSearch client wrapper
*/
public ElasticSearchRefresh(
@Nonnull @NonNull final Config config,
@Nonnull @NonNull final MetacatEventBus eventBus,
@Nonnull @NonNull final CatalogService catalogService,
@Nonnull @NonNull final DatabaseService databaseService,
@Nonnull @NonNull final TableService tableService,
@Nonnull @NonNull final PartitionService partitionService,
@Nonnull @NonNull final UserMetadataService userMetadataService,
@Nonnull @NonNull final TagService tagService,
@Nonnull @NonNull final ElasticSearchUtil elasticSearchUtil,
@Nonnull @NonNull final Registry registry
) {
this.config = config;
this.eventBus = eventBus;
this.catalogService = catalogService;
this.databaseService = databaseService;
this.tableService = tableService;
this.partitionService = partitionService;
this.userMetadataService = userMetadataService;
this.tagService = tagService;
this.elasticSearchUtil = elasticSearchUtil;
this.registry = registry;
}
private static ExecutorService newFixedThreadPool(
final int nThreads,
final String threadFactoryName,
final int queueSize
) {
return new ThreadPoolExecutor(nThreads, nThreads,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(queueSize),
new ThreadFactoryBuilder()
.setNameFormat(threadFactoryName)
.build(),
(r, executor) -> {
// this will block if the queue is full
try {
executor.getQueue().put(r);
} catch (InterruptedException e) {
throw Throwables.propagate(e);
}
});
}
/**
* Does a sweep across all catalogs to refresh the same data in elastic search.
*/
public void process() {
final List<String> catalogNames = getCatalogNamesToRefresh();
final List<QualifiedName> qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
_process(qNames, () -> _processCatalogs(catalogNames), "process", true, 1000);
}
/**
* Does a sweep across given catalogs to refresh the same data in elastic search.
*
* @param catalogNames catalog anmes
*/
public void processCatalogs(final List<String> catalogNames) {
final List<QualifiedName> qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
_process(qNames, () -> _processCatalogs(catalogNames), "processCatalogs", true, 1000);
}
/**
* Does a sweep across given catalog and databases to refresh the same data in elastic search.
*
* @param catalogName catalog
* @param databaseNames database names
*/
public void processDatabases(final String catalogName, final List<String> databaseNames) {
final List<QualifiedName> qNames = databaseNames.stream()
.map(s -> QualifiedName.ofDatabase(catalogName, s)).collect(Collectors.toList());
_process(qNames, () -> _processDatabases(QualifiedName.ofCatalog(catalogName), qNames), "processDatabases",
true, 1000);
}
/**
* Does a sweep across all catalogs to refresh the same data in elastic search.
*
* @param names qualified names
*/
public void processPartitions(final List<QualifiedName> names) {
List<QualifiedName> qNames = names;
if (qNames == null || qNames.isEmpty()) {
final List<String> catalogNames = Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(config.getElasticSearchRefreshPartitionsIncludeCatalogs());
qNames = catalogNames.stream()
.map(QualifiedName::ofCatalog).collect(Collectors.toList());
}
final List<QualifiedName> qualifiedNames = qNames;
_process(qualifiedNames, () -> _processPartitions(qualifiedNames), "processPartitions", false, 500);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
final List<String> tables =
elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(),
qNames, excludeQualifiedNames);
final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream().map(s -> service.submit(() -> {
final QualifiedName tableName = QualifiedName.fromString(s, false);
final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
int offset = 0;
int count;
final Sort sort;
if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
sort = new Sort("id", SortOrder.ASC);
} else {
sort = new Sort("part_id", SortOrder.ASC);
}
final Pageable pageable = new Pageable(10000, offset);
do {
final List<PartitionDto> partitionDtos =
partitionService.list(tableName, sort, pageable, true, true,
new GetPartitionsRequestDto(null, null, true, true));
count = partitionDtos.size();
if (!partitionDtos.isEmpty()) {
final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000);
partitionedPartitionDtos.forEach(
subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos)));
offset = offset + count;
pageable.setOffset(offset);
}
} while (count == 10000);
return Futures.transform(Futures.successfulAsList(indexFutures),
Functions.constant((Void) null), defaultService);
})).collect(Collectors.toList());
final ListenableFuture<Void> processPartitionsFuture = Futures.transformAsync(Futures.successfulAsList(futures),
input -> {
final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL)
.collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls),
Functions.constant(null), defaultService);
}, defaultService);
return Futures.transformAsync(processPartitionsFuture, input -> {
elasticSearchUtil.refresh();
final List<ListenableFuture<Void>> cleanUpFutures = tables.stream()
.map(s -> service
.submit(() -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames)))
.collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(cleanUpFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
private Void partitionsCleanUp(final QualifiedName tableName, final List<QualifiedName> excludeQualifiedNames) {
final List<PartitionDto> unmarkedPartitionDtos = elasticSearchUtil.getQualifiedNamesByMarkerByNames(
ElasticSearchDoc.Type.partition.name(),
Lists.newArrayList(tableName), refreshMarker, excludeQualifiedNames, PartitionDto.class);
if (!unmarkedPartitionDtos.isEmpty()) {
log.info("Start deleting unmarked partitions({}) for table {}",
unmarkedPartitionDtos.size(), tableName);
try {
final List<String> unmarkedPartitionNames = unmarkedPartitionDtos.stream()
.map(p -> p.getDefinitionName().getPartitionName()).collect(Collectors.toList());
final Set<String> existingUnmarkedPartitionNames = Sets.newHashSet(
partitionService.getPartitionKeys(tableName, null, null,
new GetPartitionsRequestDto(null, unmarkedPartitionNames, false, true)));
final List<String> partitionIds = unmarkedPartitionDtos.stream()
.filter(p -> !existingUnmarkedPartitionNames.contains(
p.getDefinitionName().getPartitionName()))
.map(p -> p.getDefinitionName().toString()).collect(Collectors.toList());
if (!partitionIds.isEmpty()) {
log.info("Deleting unused partitions({}) for table {}:{}",
partitionIds.size(), tableName, partitionIds);
elasticSearchUtil.delete(ElasticSearchDoc.Type.partition.name(), partitionIds);
final List<HasMetadata> deletePartitionDtos = unmarkedPartitionDtos.stream()
.filter(
p -> !existingUnmarkedPartitionNames.contains(
p.getDefinitionName().getPartitionName()))
.collect(Collectors.toList());
userMetadataService.deleteMetadata("admin", deletePartitionDtos);
}
} catch (Exception e) {
log.warn("Failed deleting the unmarked partitions for table {}", tableName);
}
log.info("End deleting unmarked partitions for table {}", tableName);
}
return null;
}
@SuppressWarnings("checkstyle:methodname")
private void _process(final List<QualifiedName> qNames, final Supplier<ListenableFuture<Void>> supplier,
final String requestName, final boolean delete, final int queueSize) {
if (isElasticSearchMetacatRefreshAlreadyRunning.compareAndSet(false, true)) {
final long start = registry.clock().wallTime();
try {
log.info("Start: Full refresh of metacat index in elastic search. Processing {} ...", qNames);
final MetacatRequestContext context = MetacatRequestContext.builder()
.userName("admin")
.clientAppName("elasticSearchRefresher")
.apiUri("esRefresh")
.scheme("internal")
.build();
MetacatContextManager.setContext(context);
refreshMarker = Instant.now();
refreshMarkerText = refreshMarker.toString();
service = MoreExecutors
.listeningDecorator(newFixedThreadPool(10, "elasticsearch-refresher-%d", queueSize));
esService = MoreExecutors
.listeningDecorator(newFixedThreadPool(5, "elasticsearch-refresher-es-%d", queueSize));
defaultService = Executors.newSingleThreadExecutor();
supplier.get().get(24, TimeUnit.HOURS);
log.info("End: Full refresh of metacat index in elastic search");
if (delete) {
deleteUnmarkedEntities(qNames, config.getElasticSearchRefreshExcludeQualifiedNames());
}
} catch (Exception e) {
log.error("Full refresh of metacat index failed", e);
registry.counter(registry.createId(Metrics.CounterElasticSearchRefresh.getMetricName())
.withTags(Metrics.tagStatusFailureMap)).increment();
} finally {
try {
shutdown(service);
shutdown(esService);
shutdown(defaultService);
} finally {
isElasticSearchMetacatRefreshAlreadyRunning.set(false);
final long duration = registry.clock().wallTime() - start;
this.registry.timer(Metrics.TimerElasticSearchRefresh.getMetricName()
+ "." + requestName).record(duration, TimeUnit.MILLISECONDS);
log.info("### Time taken to complete {} is {} ms", requestName, duration);
}
}
} else {
log.info("Full refresh of metacat index is already running.");
registry.counter(registry.createId(Metrics.CounterElasticSearchRefreshAlreadyRunning.getMetricName()))
.increment();
}
}
private void shutdown(@Nullable final ExecutorService executorService) {
if (executorService != null) {
executorService.shutdown();
try {
// Wait a while for existing tasks to terminate
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
executorService.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
log.warn("Thread pool for metacat refresh did not terminate");
}
}
} catch (InterruptedException ie) {
// (Re-)Cancel if current thread also interrupted
executorService.shutdownNow();
// Preserve interrupt status
Thread.currentThread().interrupt();
}
}
}
private void deleteUnmarkedEntities(final List<QualifiedName> qNames,
final List<QualifiedName> excludeQualifiedNames) {
log.info("Start: Delete unmarked entities");
//
// get unmarked qualified names
// check if it not exists
// delete
//
elasticSearchUtil.refresh();
final MetacatRequestContext context = MetacatRequestContext.builder().userName("admin").
clientAppName("metacat-refresh")
.apiUri("esRefresh")
.scheme("internal").build();
final List<DatabaseDto> unmarkedDatabaseDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("database", qNames, refreshMarker, excludeQualifiedNames,
DatabaseDto.class);
if (!unmarkedDatabaseDtos.isEmpty()) {
if (unmarkedDatabaseDtos.size() <= config.getElasticSearchThresholdUnmarkedDatabasesDelete()) {
log.info("Start: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
final List<String> unmarkedDatabaseNames = Lists.newArrayList();
final List<DatabaseDto> deleteDatabaseDtos = unmarkedDatabaseDtos.stream().filter(databaseDto -> {
boolean result = false;
try {
unmarkedDatabaseNames.add(databaseDto.getName().toString());
final DatabaseDto dto = databaseService.get(databaseDto.getName(),
GetDatabaseServiceParameters.builder()
.includeUserMetadata(false)
.includeTableNames(false)
.disableOnReadMetadataIntercetor(false)
.build());
if (dto == null) {
result = true;
}
} catch (DatabaseNotFoundException de) {
result = true;
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
databaseDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked databases({}): {}", unmarkedDatabaseNames.size(), unmarkedDatabaseNames);
log.info("Deleting databases({})", deleteDatabaseDtos.size());
if (!deleteDatabaseDtos.isEmpty()) {
final List<QualifiedName> deleteDatabaseQualifiedNames = deleteDatabaseDtos.stream()
.map(DatabaseDto::getName)
.collect(Collectors.toList());
final List<String> deleteDatabaseNames = deleteDatabaseQualifiedNames.stream().map(
QualifiedName::toString).collect(Collectors.toList());
log.info("Deleting databases({}): {}", deleteDatabaseNames.size(), deleteDatabaseNames);
userMetadataService.deleteDefinitionMetadata(deleteDatabaseQualifiedNames);
elasticSearchUtil.softDelete("database", deleteDatabaseNames, context);
}
log.info("End: Delete unmarked databases({})", unmarkedDatabaseDtos.size());
} else {
log.info("Count of unmarked databases({}) is more than the threshold {}", unmarkedDatabaseDtos.size(),
config.getElasticSearchThresholdUnmarkedDatabasesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedDatabaseThreshholdReached.getMetricName()))
.increment();
}
}
final List<TableDto> unmarkedTableDtos = elasticSearchUtil
.getQualifiedNamesByMarkerByNames("table",
qNames, refreshMarker, excludeQualifiedNames, TableDto.class);
if (!unmarkedTableDtos.isEmpty()) {
if (unmarkedTableDtos.size() <= config.getElasticSearchThresholdUnmarkedTablesDelete()) {
log.info("Start: Delete unmarked tables({})", unmarkedTableDtos.size());
final List<String> unmarkedTableNames = Lists.newArrayList();
final List<TableDto> deleteTableDtos = unmarkedTableDtos.stream().filter(tableDto -> {
boolean result = false;
try {
unmarkedTableNames.add(tableDto.getName().toString());
final Optional<TableDto> dto = tableService.get(tableDto.getName(),
GetTableServiceParameters.builder()
.includeDataMetadata(false)
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDefinitionMetadata(false)
.build());
if (!dto.isPresent()) {
result = true;
}
} catch (Exception e) {
log.warn("Ignoring exception during deleteUnmarkedEntities for {}. Message: {}",
tableDto.getName(), e.getMessage());
}
return result;
}).collect(Collectors.toList());
log.info("Unmarked tables({}): {}", unmarkedTableNames.size(), unmarkedTableNames);
log.info("Deleting tables({})", deleteTableDtos.size());
if (!deleteTableDtos.isEmpty()) {
final List<String> deleteTableNames = deleteTableDtos.stream().map(
dto -> dto.getName().toString()).collect(Collectors.toList());
log.info("Deleting tables({}): {}", deleteTableNames.size(), deleteTableNames);
userMetadataService.deleteMetadata("admin", Lists.newArrayList(deleteTableDtos));
// Publish event. Elasticsearch event handler will take care of updating the index already
// TODO: Re-evaluate events vs. direct calls for these types of situations like in Genie
deleteTableDtos.forEach(
tableDto -> {
tagService.delete(tableDto.getName(), false);
this.eventBus.post(
new MetacatDeleteTablePostEvent(tableDto.getName(), context, this, tableDto)
);
}
);
}
log.info("End: Delete unmarked tables({})", unmarkedTableDtos.size());
} else {
log.info("Count of unmarked tables({}) is more than the threshold {}", unmarkedTableDtos.size(),
config.getElasticSearchThresholdUnmarkedTablesDelete());
registry.counter(
registry.createId(Metrics.CounterElasticSearchUnmarkedTableThreshholdReached.getMetricName()))
.increment();
}
}
log.info("End: Delete unmarked entities");
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processCatalogs(final List<String> catalogNames) {
log.info("Start: Full refresh of catalogs: {}", catalogNames);
final List<ListenableFuture<CatalogDto>> getCatalogFutures = catalogNames.stream()
.map(catalogName -> service.submit(() -> {
CatalogDto result = null;
try {
result = getCatalog(catalogName);
} catch (Exception e) {
log.error("Failed to retrieve catalog: {}", catalogName);
elasticSearchUtil.log("ElasticSearchRefresh.getCatalog",
ElasticSearchDoc.Type.catalog.name(), catalogName, null,
e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getCatalogFutures),
input -> {
final List<ListenableFuture<Void>> processCatalogFutures = input.stream().filter(NOT_NULL).map(
catalogDto -> {
final List<QualifiedName> databaseNames = getDatabaseNamesToRefresh(catalogDto);
return _processDatabases(catalogDto.getName(), databaseNames);
}).filter(NOT_NULL).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(processCatalogFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
private List<QualifiedName> getDatabaseNamesToRefresh(final CatalogDto catalogDto) {
List<QualifiedName> result = null;
if (!config.getElasticSearchRefreshIncludeDatabases().isEmpty()) {
result = config.getElasticSearchRefreshIncludeDatabases().stream()
.filter(q -> catalogDto.getName().getCatalogName().equals(q.getCatalogName()))
.collect(Collectors.toList());
} else {
result = catalogDto.getDatabases().stream()
.map(n -> QualifiedName.ofDatabase(catalogDto.getName().getCatalogName(), n))
.collect(Collectors.toList());
}
if (!config.getElasticSearchRefreshExcludeQualifiedNames().isEmpty()) {
result.removeAll(config.getElasticSearchRefreshExcludeQualifiedNames());
}
return result;
}
private List<String> getCatalogNamesToRefresh() {
List<String> result = null;
if (!Strings.isNullOrEmpty(config.getElasticSearchRefreshIncludeCatalogs())) {
result = Splitter.on(',').omitEmptyStrings().trimResults()
.splitToList(config.getElasticSearchRefreshIncludeCatalogs());
} else {
result = getCatalogNames();
}
return result;
}
/**
* Process the list of databases.
*
* @param catalogName catalog name
* @param databaseNames database names
* @return future
*/
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processDatabases(final QualifiedName catalogName,
final List<QualifiedName> databaseNames) {
ListenableFuture<Void> resultFuture = null;
log.info("Full refresh of catalog {} for databases({}): {}", catalogName, databaseNames.size(), databaseNames);
final List<ListenableFuture<DatabaseDto>> getDatabaseFutures = databaseNames.stream()
.map(databaseName -> service.submit(() -> {
DatabaseDto result = null;
try {
result = getDatabase(databaseName);
} catch (Exception e) {
log.error("Failed to retrieve database: {}", databaseName);
elasticSearchUtil.log("ElasticSearchRefresh.getDatabase",
ElasticSearchDoc.Type.database.name(),
databaseName.toString(), null, e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
if (getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) {
resultFuture = Futures.transformAsync(Futures.successfulAsList(getDatabaseFutures),
input -> {
final ListenableFuture<Void> processDatabaseFuture = indexDatabaseDtos(catalogName, input);
final List<ListenableFuture<Void>> processDatabaseFutures = input.stream().filter(NOT_NULL)
.map(databaseDto -> {
final List<QualifiedName> tableNames = databaseDto.getTables().stream()
.map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(),
databaseDto.getName().getDatabaseName(), s))
.collect(Collectors.toList());
log.info("Full refresh of database {} for tables({}): {}",
databaseDto.getName(),
databaseDto.getTables().size(), databaseDto.getTables());
return processTables(databaseDto.getName(), tableNames);
}).filter(NOT_NULL).collect(Collectors.toList());
processDatabaseFutures.add(processDatabaseFuture);
return Futures.transform(Futures.successfulAsList(processDatabaseFutures),
Functions.constant(null), defaultService);
}, defaultService);
}
return resultFuture;
}
/**
* Save all databases to index it in elastic search.
*
* @param catalogName catalog name
* @param dtos database dtos
* @return future
*/
private ListenableFuture<Void> indexDatabaseDtos(final QualifiedName catalogName, final List<DatabaseDto> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream()
.filter(dto -> dto != null)
.map(dto -> new ElasticSearchDoc(dto.getName().toString(), dto, "admin", false, refreshMarkerText))
.collect(Collectors.toList());
log.info("Saving databases for catalog: {}", catalogName);
elasticSearchUtil.save(ElasticSearchDoc.Type.database.name(), docs);
return null;
});
}
/**
* Process the list of tables in batches.
*
* @param databaseName database name
* @param tableNames table names
* @return A future containing the tasks
*/
private ListenableFuture<Void> processTables(final QualifiedName databaseName,
final List<QualifiedName> tableNames) {
final List<List<QualifiedName>> tableNamesBatches = Lists.partition(tableNames, 500);
final List<ListenableFuture<Void>> processTablesBatchFutures = tableNamesBatches.stream().map(
subTableNames -> _processTables(databaseName, subTableNames)).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(processTablesBatchFutures),
Functions.constant(null), defaultService);
}
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processTables(final QualifiedName databaseName,
final List<QualifiedName> tableNames) {
final List<ListenableFuture<Optional<TableDto>>> getTableFutures = tableNames.stream()
.map(tableName -> service.submit(() -> {
Optional<TableDto> result = null;
try {
result = getTable(tableName);
} catch (Exception e) {
log.error("Failed to retrieve table: {}", tableName);
elasticSearchUtil.log("ElasticSearchRefresh.getTable",
ElasticSearchDoc.Type.table.name(),
tableName.toString(), null, e.getMessage(), e, true);
}
return result;
}))
.collect(Collectors.toList());
return Futures.transformAsync(Futures.successfulAsList(getTableFutures),
input -> indexTableDtos(databaseName, input), defaultService);
}
/**
* Save all tables to index it in elastic search.
*
* @param databaseName database name
* @param dtos table dtos
* @return future
*/
private ListenableFuture<Void> indexTableDtos(final QualifiedName databaseName,
final List<Optional<TableDto>> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null && dto.isPresent()).map(
tableDtoOptional -> {
final TableDto dto = tableDtoOptional.get();
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarkerText);
}).collect(Collectors.toList());
log.info("Saving tables for database: {}", databaseName);
elasticSearchUtil.save(ElasticSearchDoc.Type.table.name(), docs);
return null;
});
}
/**
* Save all tables to index it in elastic search.
*
* @param tableName database name
* @param dtos partition dtos
* @return future
*/
private ListenableFuture<Void> indexPartitionDtos(final QualifiedName tableName, final List<PartitionDto> dtos) {
return esService.submit(() -> {
final List<ElasticSearchDoc> docs = dtos.stream().filter(dto -> dto != null).map(
dto -> {
final String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() : "admin";
return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarkerText);
}).collect(Collectors.toList());
log.info("Saving partitions for tableName: {}", tableName);
elasticSearchUtil.save(ElasticSearchDoc.Type.partition.name(), docs);
return null;
});
}
protected List<String> getCatalogNames() {
return catalogService.getCatalogNames().stream().map(CatalogMappingDto::getCatalogName).collect(
Collectors.toList());
}
protected CatalogDto getCatalog(final String catalogName) {
return catalogService.get(QualifiedName.ofCatalog(catalogName));
}
protected DatabaseDto getDatabase(final QualifiedName databaseName) {
return databaseService.get(databaseName,
GetDatabaseServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeTableNames(true)
.includeUserMetadata(true)
.build());
}
protected Optional<TableDto> getTable(final QualifiedName tableName) {
return tableService.get(tableName, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
}
}
| 2,154 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDocConstants.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
/**
* ElasticSearchDocConstants.
*
* @author zhenl
*/
final class ElasticSearchDocConstants {
/**
* DEFINITION_METADATA.
*/
static final String DEFINITION_METADATA = "definitionMetadata";
/**
* DEFINITION_METADATA_OWNER.
*/
static final String DEFINITION_METADATA_OWNER = "owner";
/**
* DEFINITION_METADATA_TAGS.
*/
static final String DEFINITION_METADATA_TAGS = "tags";
/**
* DEFINITION_METADATA_DATA_HYGIENE.
*/
static final String DEFINITION_METADATA_DATA_HYGIENE = "data_hygiene";
/**
* DEFINITION_METADATA_LIFETIME.
*/
static final String DEFINITION_METADATA_LIFETIME = "lifetime";
/**
* DEFINITION_METADATA_EXTENDED_SCHEMA.
*/
static final String DEFINITION_METADATA_EXTENDED_SCHEMA = "extendedSchema";
/**
* DEFINITION_METADATA_DATA_DEPENDENCY.
*/
static final String DEFINITION_METADATA_DATA_DEPENDENCY = "data_dependency";
/**
* DEFINITION_METADATA_TABLE_COST.
*/
static final String DEFINITION_METADATA_TABLE_COST = "table_cost";
/**
* DEFINITION_METADATA_LIFECYCLE.
*/
static final String DEFINITION_METADATA_LIFECYCLE = "lifecycle";
/**
* DEFINITION_METADATA_AUDIENCE.
*/
static final String DEFINITION_METADATA_AUDIENCE = "audience";
/**
* DEFINITION_METADATA_MODEL.
*/
static final String DEFINITION_METADATA_MODEL = "model";
//TODO: remove after the data are fixed and copied to subjectAreas
/**
* DEFINITION_METADATA_SUBJECT_AREA.
*/
static final String DEFINITION_METADATA_SUBJECT_AREA = "subject_area";
/**
* DEFINITION_METADATA_SUBJECT_AREAS.
*/
static final String DEFINITION_METADATA_SUBJECT_AREAS = "subjectAreas";
/**
* DEFINITION_METADATA_DATA_CATEGORY.
*/
static final String DEFINITION_METADATA_DATA_CATEGORY = "data_category";
/**
* DEFINITION_METADATA_JOB.
*/
static final String DEFINITION_METADATA_JOB = "job";
/**
* DEFINITION_METADATA_TABLE_DESCRIPTION.
*/
static final String DEFINITION_METADATA_TABLE_DESCRIPTION = "table_description";
/**
* DEFINITION_DATA_MANAGEMENT.
*/
static final String DEFINITION_DATA_MANAGEMENT = "data_management";
private ElasticSearchDocConstants() {
}
}
| 2,155 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package includes elastic search integration classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.search;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,156 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDoc.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.search;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.dto.CatalogDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import lombok.Getter;
import java.time.Instant;
/**
* Document that gets stored in elastic search.
*
* @author amajumdar
*/
@Getter
public class ElasticSearchDoc {
/**
* Definition Metadata pull out fields.
*/
private static final String[] DEFINITION_METADATA_FIELDS = {
ElasticSearchDocConstants.DEFINITION_METADATA_OWNER,
ElasticSearchDocConstants.DEFINITION_METADATA_TAGS,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_HYGIENE,
ElasticSearchDocConstants.DEFINITION_METADATA_LIFETIME,
ElasticSearchDocConstants.DEFINITION_METADATA_EXTENDED_SCHEMA,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_DEPENDENCY,
ElasticSearchDocConstants.DEFINITION_METADATA_TABLE_COST,
ElasticSearchDocConstants.DEFINITION_METADATA_LIFECYCLE,
ElasticSearchDocConstants.DEFINITION_METADATA_AUDIENCE,
ElasticSearchDocConstants.DEFINITION_METADATA_MODEL,
ElasticSearchDocConstants.DEFINITION_METADATA_SUBJECT_AREA, //TODO: remove after the data is moved
ElasticSearchDocConstants.DEFINITION_METADATA_SUBJECT_AREAS,
ElasticSearchDocConstants.DEFINITION_METADATA_DATA_CATEGORY,
ElasticSearchDocConstants.DEFINITION_METADATA_JOB,
ElasticSearchDocConstants.DEFINITION_METADATA_TABLE_DESCRIPTION,
ElasticSearchDocConstants.DEFINITION_DATA_MANAGEMENT,
};
private String id;
private Object dto;
private Long timestamp;
private String user;
private boolean deleted;
private String refreshMarker;
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.timestamp = Instant.now().toEpochMilli();
}
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
* @param timestamp timestampe of the doc
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted,
final long timestamp) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.timestamp = timestamp;
}
/**
* Constructor.
*
* @param id doc id
* @param dto dto
* @param user user name
* @param deleted is it marked deleted
* @param refreshMarker refresh marker
*/
public ElasticSearchDoc(final String id,
final Object dto,
final String user,
final boolean deleted,
final String refreshMarker) {
this.id = id;
this.dto = dto;
this.user = user;
this.deleted = deleted;
this.refreshMarker = refreshMarker;
this.timestamp = Instant.now().toEpochMilli();
}
/**
* addSearchableDefinitionMetadataEnabled.
*
* @param objectNode object node
*/
public void addSearchableDefinitionMetadata(final ObjectNode objectNode) {
final JsonNode jsonNode = objectNode.get(ElasticSearchDocConstants.DEFINITION_METADATA);
final ObjectNode node = JsonNodeFactory.instance.objectNode();
for (final String tag : DEFINITION_METADATA_FIELDS) {
node.set(tag, jsonNode.get(tag));
}
objectNode.set(Field.SEARCHABLE_DEFINITION_METADATA, node);
}
/**
* Document types.
*/
public enum Type {
/**
* Document types.
*/
catalog(CatalogDto.class), database(DatabaseDto.class), table(TableDto.class),
/**
* Document types.
*/
mview(TableDto.class), partition(PartitionDto.class);
private Class clazz;
Type(final Class clazz) {
this.clazz = clazz;
}
public Class getClazz() {
return clazz;
}
}
/**
* Document context attributes.
*/
protected static class Field {
public static final String USER = "user_";
public static final String DELETED = "deleted_";
public static final String REFRESH_MARKER = "refreshMarker_";
public static final String SEARCHABLE_DEFINITION_METADATA = "searchableDefinitionMetadata";
public static final String TIMESTAMP = "timestamp";
public static final String DATA_METADATA = "dataMetadata";
}
}
| 2,157 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/NotificationService.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
/**
* Interface for services which will provide external notifications based on internal events. The structure and
* destinations of the notifications are left up to the implementation.
*
* @author tgianos
* @since 0.1.47
*/
public interface NotificationService {
/**
* Publish information about partitions being added.
*
* @param event The event passed within the JVM after a partition has been successfully added
*/
void notifyOfPartitionAddition(MetacatSaveTablePartitionPostEvent event);
/**
* Publish information about partition metadata save only.
*
* @param event The event passed within the JVM after a partition has been successfully added
*/
void notifyOfPartitionMetdataDataSaveOnly(MetacatSaveTablePartitionMetadataOnlyPostEvent event);
/**
* Publish information about partitions being deleted.
*
* @param event The event passed within the JVM after a partition has been successfully deleted
*/
void notifyOfPartitionDeletion(MetacatDeleteTablePartitionPostEvent event);
/**
* Publish information about a table being created.
*
* @param event The event passed within the JVM after a table has been successfully created
*/
void notifyOfTableCreation(MetacatCreateTablePostEvent event);
/**
* Publish information about a table being deleted.
*
* @param event The event passed within the JVM after a table has been successfully deleted
*/
void notifyOfTableDeletion(MetacatDeleteTablePostEvent event);
/**
* Publish information about a table being renamed.
*
* @param event The event passed within the JVM after a table has been successfully renamed
*/
void notifyOfTableRename(MetacatRenameTablePostEvent event);
/**
* Publish information about a table being updated.
*
* @param event The event passed within the JVM after a table has been successfully updated
*/
void notifyOfTableUpdate(MetacatUpdateTablePostEvent event);
}
| 2,158 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/DefaultNotificationServiceImpl.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import lombok.extern.slf4j.Slf4j;
/**
* This is a default implementation of the NotificationService interface. It doesn't really do anything other than
* log the event that would have generated some sort of external notification in a real instance. This class exists
* primarily to handle returns from providers when the "plugin" isn't enabled instead of returning null which is
* prohibited by the Provider interface definition.
*
* @author tgianos
* @since 0.1.47
*/
@Slf4j
public class DefaultNotificationServiceImpl implements NotificationService {
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionAddition(
final MetacatSaveTablePartitionPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionDeletion(final MetacatDeleteTablePartitionPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfPartitionMetdataDataSaveOnly(final MetacatSaveTablePartitionMetadataOnlyPostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableCreation(final MetacatCreateTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableDeletion(final MetacatDeleteTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableRename(final MetacatRenameTablePostEvent event) {
log.debug(event.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void notifyOfTableUpdate(final MetacatUpdateTablePostEvent event) {
log.debug(event.toString());
}
}
| 2,159 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes related to sending notifications out of Metacat via implementations of the service interface.
*
* @author tgianos
* @since 0.1.46
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.notifications;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,160 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationPartitionAddMsg.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
/**
* Enum class for partition add payload message.
*
* @author zhenl
* @since 1.2.0
*/
public enum SNSNotificationPartitionAddMsg {
/**
* Attached Valid Paritition Key.
*/
ATTACHED_VALID_PARITITION_KEY,
/**
* Invalid Partition Key Format.
*/
INVALID_PARTITION_KEY_FORMAT,
/**
* All Future Partition Keys.
*/
ALL_FUTURE_PARTITION_KEYS,
/**
* Empty Deleted Column.
*/
EMPTY_DELETE_COLUMN,
/**
* No Candidate Partitions Keys.
*/
NO_CANDIDATE_PARTITION_KEYS,
/**
* Missing Metadata Info For Partition Key.
*/
MISSING_METADATA_INFO_FOR_PARTITION_KEY,
/**
* Failuer of Getting Latest Partition Key.
*/
FAILURE_OF_GET_LATEST_PARTITION_KEY,
/**
* Partition Key Unabled.
*/
PARTITION_KEY_UNABLED
}
| 2,161 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationMetric.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* SNS Notification Metric.
*
* @author zhenl
* @since 1.1.0
*/
@Slf4j
@Getter
public class SNSNotificationMetric {
private final Registry registry;
private final HashMap<String, Counter> counterHashMap = new HashMap<>();
/**
* Constructor.
*
* @param registry The registry handle of spectator
*/
public SNSNotificationMetric(
final Registry registry
) {
this.registry = registry;
this.counterHashMap.put(Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationPartitionDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationPartitionDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableCreate.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableCreate.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableDelete.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableDelete.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableRename.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableRename.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationTableUpdate.getMetricName(),
registry.counter(registry.createId(Metrics.CounterSNSNotificationTableUpdate.getMetricName())
.withTags(Metrics.tagStatusSuccessMap)));
this.counterHashMap.put(Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName())));
this.counterHashMap.put(Metrics.CounterSNSNotificationPartitionAdd.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPartitionAdd.getMetricName())));
this.counterHashMap.put(Metrics.CounterSNSNotificationPublishFallback.getMetricName(),
registry.counter(
registry.createId(Metrics.CounterSNSNotificationPublishFallback.getMetricName())));
}
void counterIncrement(final String counterKey) {
if (counterHashMap.containsKey(counterKey)) {
this.counterHashMap.get(counterKey).increment();
} else {
log.error("SNS Notification does not suport counter for {}", counterKey);
}
}
void handleException(
final QualifiedName name,
final String message,
final String counterKey,
@Nullable final SNSMessage payload,
final Exception e
) {
log.error("{} with payload: {}", message, payload, e);
final Map<String, String> tags = new HashMap<>(name.parts());
tags.putAll(Metrics.tagStatusFailureMap);
this.registry.counter(this.registry.createId(counterKey).withTags(tags)).increment();
Throwables.propagate(e);
}
void recordTime(final SNSMessage<?> message, final String timeName) {
final Timer timer = this.registry.timer(
timeName,
Metrics.TagEventsType.getMetricName(),
message.getClass().getName()
);
timer.record(this.registry.clock().wallTime() - message.getTimestamp(), TimeUnit.MILLISECONDS);
}
void recordPartitionLatestDeleteColumn(final QualifiedName name,
@Nullable final String latestDeleteColumn,
final String message) {
final Map<String, String> tags = new HashMap<>(name.parts());
if (latestDeleteColumn != null) {
tags.put("latestDeleteColumn", latestDeleteColumn);
}
tags.put("message", message);
this.registry.counter(
this.registry.createId(Metrics.CounterSNSNotificationPartitionLatestDeleteColumnAdd.getMetricName())
.withTags(tags)).increment();
}
}
| 2,162 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationServiceImpl.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.model.PublishResult;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.fge.jsonpatch.JsonPatch;
import com.github.fge.jsonpatch.diff.JsonDiff;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.messages.AddPartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.CreateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeletePartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeleteTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.RenameTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateOrRenameTableMessageBase;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTablePartitionsMessage;
import com.netflix.metacat.common.dto.notifications.sns.payloads.TablePartitionsUpdatePayload;
import com.netflix.metacat.common.dto.notifications.sns.payloads.UpdatePayload;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent;
import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.main.configs.SNSNotificationsConfig;
import com.netflix.metacat.main.services.notifications.NotificationService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.event.EventListener;
import javax.annotation.Nullable;
import javax.validation.constraints.Size;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Implementation of the NotificationService using Amazon SNS.
*
* @author tgianos
* @since 0.1.47
*/
@Slf4j
public class SNSNotificationServiceImpl implements NotificationService {
private final AtomicBoolean isClientPoolDown;
private AmazonSNS client;
private final String tableTopicArn;
private final String partitionTopicArn;
private final ObjectMapper mapper;
private final Config config;
private SNSNotificationMetric notificationMetric;
private SNSNotificationServiceUtil snsNotificationServiceUtil;
/**
* Constructor.
*
* @param client The SNS client to use to publish notifications
* @param tableTopicArn The topic to publish table related notifications to
* @param partitionTopicArn The topic to publish partition related notifications to
* @param mapper The object mapper to use to convert objects to JSON strings
* @param config The system config
* @param notificationMetric The SNS notification metric
* @param snsNotificationServiceUtil The SNS notification service util
*/
public SNSNotificationServiceImpl(
final AmazonSNS client,
@Size(min = 1) final String tableTopicArn,
@Size(min = 1) final String partitionTopicArn,
final ObjectMapper mapper,
final Config config,
final SNSNotificationMetric notificationMetric,
final SNSNotificationServiceUtil snsNotificationServiceUtil
) {
this.client = client;
this.tableTopicArn = tableTopicArn;
this.partitionTopicArn = partitionTopicArn;
this.mapper = mapper;
this.config = config;
this.notificationMetric = notificationMetric;
this.snsNotificationServiceUtil = snsNotificationServiceUtil;
this.isClientPoolDown = new AtomicBoolean();
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionAddition(final MetacatSaveTablePartitionPostEvent event) {
log.debug("Received SaveTablePartitionPostEvent {}", event);
final String name = event.getName().toString();
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
// Publish a global message stating how many partitions were updated for the table to the table topic
final TablePartitionsUpdatePayload partitionsUpdatePayload;
if (this.config.isSnsNotificationAttachPartitionIdsEnabled() && event.getPartitions() != null) {
partitionsUpdatePayload = this.snsNotificationServiceUtil.
createTablePartitionsUpdatePayload(event.getPartitions(), event);
} else {
partitionsUpdatePayload = new TablePartitionsUpdatePayload(null, event.getPartitions().size(), 0,
SNSNotificationPartitionAddMsg.PARTITION_KEY_UNABLED.name(),
SNSNotificationServiceUtil.getPartitionNameListFromDtos(event.getPartitions()));
}
final UpdateTablePartitionsMessage tableMessage = new UpdateTablePartitionsMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionsUpdatePayload);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
tableMessage, event.getName(),
"Unable to publish table partition add notification",
Metrics.CounterSNSNotificationTablePartitionAdd.getMetricName());
//publish the delete column key metric after publishing message
if (this.config.isSnsNotificationAttachPartitionIdsEnabled()) {
this.notificationMetric.recordPartitionLatestDeleteColumn(
event.getName(), partitionsUpdatePayload.getLatestDeleteColumnValue(),
partitionsUpdatePayload.getMessage());
}
if (config.isSnsNotificationTopicPartitionEnabled()) {
AddPartitionMessage message = null;
for (final PartitionDto partition : event.getPartitions()) {
message = new AddPartitionMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partition
);
this.publishNotification(this.partitionTopicArn, this.config.getFallbackSnsTopicPartitionArn(),
message, event.getName(),
"Unable to publish partition creation notification",
Metrics.CounterSNSNotificationPartitionAdd.getMetricName());
log.debug("Published create partition message {} on {}", message, this.partitionTopicArn);
}
}
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionMetdataDataSaveOnly(final MetacatSaveTablePartitionMetadataOnlyPostEvent event) {
log.debug("Received SaveTablePartitionMetadataOnlyPostEvent {}", event);
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfPartitionDeletion(final MetacatDeleteTablePartitionPostEvent event) {
log.debug("Received DeleteTablePartition event {}", event);
final String name = event.getName().toString();
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
final TablePartitionsUpdatePayload partitionsUpdatePayload;
partitionsUpdatePayload = new TablePartitionsUpdatePayload(null, 0, event.getPartitions().size(),
SNSNotificationPartitionAddMsg.PARTITION_KEY_UNABLED.name(),
SNSNotificationServiceUtil.getPartitionNameListFromDtos(event.getPartitions()));
final UpdateTablePartitionsMessage tableMessage = new UpdateTablePartitionsMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionsUpdatePayload
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
tableMessage, event.getName(),
"Unable to publish table partition delete notification",
Metrics.CounterSNSNotificationTablePartitionDelete.getMetricName());
DeletePartitionMessage message = null;
if (config.isSnsNotificationTopicPartitionEnabled()) {
for (final String partitionId : event.getPartitionIds()) {
message = new DeletePartitionMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
partitionId
);
this.publishNotification(this.partitionTopicArn, this.config.getFallbackSnsTopicPartitionArn(),
message, event.getName(),
"Unable to publish partition deletion notification",
Metrics.CounterSNSNotificationPartitionDelete.getMetricName());
log.debug("Published delete partition message {} on {}", message, this.partitionTopicArn);
}
}
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableCreation(final MetacatCreateTablePostEvent event) {
log.debug("Received CreateTableEvent {}", event);
final CreateTableMessage message = new CreateTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName().toString(),
event.getTable()
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish create table notification",
Metrics.CounterSNSNotificationTableCreate.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableDeletion(final MetacatDeleteTablePostEvent event) {
log.debug("Received DeleteTableEvent {}", event);
final DeleteTableMessage message = new DeleteTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName().toString(),
event.getTable()
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish delete table notification",
Metrics.CounterSNSNotificationTableDelete.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableRename(final MetacatRenameTablePostEvent event) {
log.debug("Received RenameTableEvent {}", event);
final RenameTableMessage message = (RenameTableMessage) this.createUpdateorRenameTableMessage(
UUID.randomUUID().toString(),
event.getRequestContext().getTimestamp(),
event.getRequestContext().getId(),
event.getName(),
event.getOldTable(),
event.getCurrentTable(),
"Unable to create json patch for rename table notification",
Metrics.CounterSNSNotificationTableRename.getMetricName(),
SNSMessageType.TABLE_RENAME
);
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish rename table notification",
Metrics.CounterSNSNotificationTableRename.getMetricName());
}
/**
* {@inheritDoc}
*/
@Override
@EventListener
public void notifyOfTableUpdate(final MetacatUpdateTablePostEvent event) {
log.debug("Received UpdateTableEvent {}", event);
final SNSMessage<?> message;
final long timestamp = event.getRequestContext().getTimestamp();
final String requestId = event.getRequestContext().getId();
final QualifiedName name = event.getName();
final TableDto oldTable = event.getOldTable();
final TableDto currentTable = event.getCurrentTable();
if (event.isLatestCurrentTable()) {
message = this.createUpdateorRenameTableMessage(
UUID.randomUUID().toString(),
timestamp,
requestId,
name,
oldTable,
currentTable,
"Unable to create json patch for update table notification",
Metrics.CounterSNSNotificationTableUpdate.getMetricName(),
SNSMessageType.TABLE_UPDATE
);
} else {
// Send a null payload if we failed to get the latest version
// of the current table. This will signal users to callback
//
message = new SNSMessage<Void>(
UUID.randomUUID().toString(),
timestamp,
requestId,
SNSMessageType.TABLE_UPDATE,
name.toString(),
null);
}
this.publishNotification(this.tableTopicArn, this.config.getFallbackSnsTopicTableArn(),
message, event.getName(),
"Unable to publish update table notification",
Metrics.CounterSNSNotificationTableUpdate.getMetricName());
}
private UpdateOrRenameTableMessageBase createUpdateorRenameTableMessage(
final String id,
final long timestamp,
final String requestId,
final QualifiedName name,
final TableDto oldTable,
final TableDto currentTable,
final String exceptionMessage,
final String metricName,
final SNSMessageType messageType
) {
try {
final JsonPatch patch = JsonDiff.asJsonPatch(
this.mapper.valueToTree(oldTable),
this.mapper.valueToTree(currentTable)
);
if (messageType == SNSMessageType.TABLE_UPDATE) {
return new UpdateTableMessage(
id,
timestamp,
requestId,
name.toString(),
new UpdatePayload<>(oldTable, patch)
);
} else {
return new RenameTableMessage(
id,
timestamp,
requestId,
name.toString(),
new UpdatePayload<>(oldTable, patch)
);
}
} catch (final Exception e) {
this.notificationMetric.handleException(
name,
exceptionMessage,
metricName,
null,
e
);
}
return null;
}
private void publishNotification(
final String arn,
@Nullable final String fallbackArn,
final SNSMessage<?> message,
final QualifiedName name,
final String errorMessage,
final String counterKey
) {
this.notificationMetric.recordTime(message, Metrics.TimerNotificationsBeforePublishDelay.getMetricName());
try {
//
// Publish the event to original SNS topic. If we receive an error from SNS, we will then try publishing
// to the fallback topic.
//
try {
publishNotification(arn, message, counterKey);
} catch (final Exception exception) {
if (fallbackArn != null) {
log.info("Fallback published message to topic {} because of error {}",
fallbackArn, exception.getMessage());
notificationMetric.counterIncrement(
Metrics.CounterSNSNotificationPublishFallback.getMetricName());
publishNotification(fallbackArn, message, counterKey);
} else {
throw exception;
}
}
} catch (Exception e) {
notificationMetric.handleException(name, errorMessage, counterKey, message, e);
}
}
private void publishNotification(
final String arn,
final SNSMessage<?> message,
final String counterKey
) throws Exception {
PublishResult result = null;
try {
result = publishNotification(arn, this.mapper.writeValueAsString(message));
} catch (Exception exception) {
log.error("SNS Publish message failed.", exception);
notificationMetric.counterIncrement(
Metrics.CounterSNSNotificationPublishMessageSizeExceeded.getMetricName());
final SNSMessage<Void> voidMessage = new SNSMessage<>(message.getId(),
message.getTimestamp(), message.getRequestId(), message.getType(), message.getName(),
null);
result = publishNotification(arn, this.mapper.writeValueAsString(voidMessage));
}
log.info("Successfully published message to topic {} with id {}", arn, result.getMessageId());
log.debug("Successfully published message {} to topic {} with id {}", message, arn, result.getMessageId());
notificationMetric.counterIncrement(counterKey);
notificationMetric.recordTime(message, Metrics.TimerNotificationsPublishDelay.getMetricName());
}
private PublishResult publishNotification(final String arn, final String message) {
if (isClientPoolDown.get()) {
synchronized (this) {
return publishNotificationWithNoCheck(arn, message);
}
} else {
return publishNotificationWithNoCheck(arn, message);
}
}
private PublishResult publishNotificationWithNoCheck(final String arn, final String message) {
try {
return this.client.publish(arn, message);
} catch (Exception e) {
//
// SNS Http client pool once shutdown cannot be recovered. Hence we are shutting down the SNS client
// and recreating a new instance.
//
return Throwables.getCausalChain(e).stream()
.filter(ex -> ex instanceof IllegalStateException
&& ex.getMessage().contains("Connection pool shut down"))
.findFirst()
.map(ex -> {
if (isClientPoolDown.compareAndSet(false, true)) {
reinitializeClient();
}
return publishNotification(arn, message);
}).orElseThrow(() -> Throwables.propagate(e));
}
}
private synchronized void reinitializeClient() {
if (isClientPoolDown.get()) {
log.warn("SNS HTTP connection pool is down. It will be restarted.");
try {
this.client.shutdown();
} catch (Exception exception) {
log.warn("Failed shutting down SNS client.", exception);
}
this.client = new SNSNotificationsConfig().amazonSNS();
isClientPoolDown.set(false);
}
}
}
| 2,163 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/SNSNotificationServiceUtil.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.services.notifications.sns;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.notifications.sns.payloads.TablePartitionsUpdatePayload;
import com.netflix.metacat.common.server.events.MetacatEvent;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TimeZone;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* The util class for SNS Notification service.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public final class SNSNotificationServiceUtil {
private static final String PARTITION_COLUMN_DATA_TYPE_PATH = "/data_dependency/partition_column_date_type";
private static final String DELETION_COLUMN_PATH = "/data_hygiene/delete_column";
private static final Set<String> PST_TIME = new HashSet<String>(Arrays.asList("region", "pacific"));
//Timestamp in seconds: 1522257960 or 1367992474.293378
//Timestamp in milliseconds: 1522257960000 or 1367992474000.293378
//ISO basic date format: 20180101
private static final Pattern TIMESTAMP_FORMAT = Pattern.compile("^(?<time>\\d{10})(?:\\d{3})?(?:\\.\\d+)?$");
private static final Pattern ISO_BASIC = Pattern.compile("^\\d{8}$");
private static final int PARTITIONS_UPDATED_LIST_MAX_SIZE = 1000;
private UserMetadataService userMetadataService;
private final DateFormat simpleDateFormatRegional = new SimpleDateFormat("yyyyMMdd");
private final DateFormat simpleDateFormatUTC = new SimpleDateFormat("yyyyMMdd");
/**
* SNS Notification Service Util constructor.
*
* @param userMetadataService user metadata service
*/
public SNSNotificationServiceUtil(
final UserMetadataService userMetadataService
) {
this.userMetadataService = userMetadataService;
this.simpleDateFormatRegional.setTimeZone(TimeZone.getTimeZone("America/Los_Angeles"));
this.simpleDateFormatUTC.setTimeZone(TimeZone.getTimeZone("UTC"));
}
/**
* create table partition add payload.
* The logic below primarily is for calculating the latest deletion column value in a batch of
* partitions. The latest delete column value:
* (1) valid timestamp/date format
* (2) the latest timestamp from the delete column
* (3) the timestamp must be less or equal to today ( utc now )
*
* @param partitionDtos partition DTOs
* @param event Metacat event
* @return TablePartitionsUpdatePayload
*/
public TablePartitionsUpdatePayload createTablePartitionsUpdatePayload(
final List<PartitionDto> partitionDtos,
final MetacatEvent event) {
final List<String> deleteColumnValues;
String latestDeleteColumnValue = null;
String message;
try {
final Optional<ObjectNode> objectNode = this.userMetadataService.getDefinitionMetadata(
QualifiedName.ofTable(event.getName().getCatalogName(), event.getName().getDatabaseName(),
event.getName().getTableName()));
//Mark as missing metadata if any of delete column or partition column data type is missing
if (objectNode.isPresent()
&& !objectNode.get().at(DELETION_COLUMN_PATH).isMissingNode()
&& !objectNode.get().at(PARTITION_COLUMN_DATA_TYPE_PATH).isMissingNode()) {
final String deleteColumn = objectNode.get().at(DELETION_COLUMN_PATH).textValue();
//Mark with message empty delete column and return
if (StringUtils.isEmpty(deleteColumn)) {
return new TablePartitionsUpdatePayload(
null,
partitionDtos.size(),
0,
SNSNotificationPartitionAddMsg.EMPTY_DELETE_COLUMN.name(),
getPartitionNameListFromDtos(partitionDtos)
);
}
deleteColumnValues = getSortedDeletionPartitionKeys(partitionDtos, deleteColumn);
//Calculate the latest partition key from candidates
if (deleteColumnValues != null && !deleteColumnValues.isEmpty()) {
message = SNSNotificationPartitionAddMsg.ALL_FUTURE_PARTITION_KEYS.name();
//using utc now as today
final long nowSecond = Instant.now().getEpochSecond();
final boolean regional = PST_TIME.contains(
objectNode.get().at(PARTITION_COLUMN_DATA_TYPE_PATH).textValue());
//convert the value to utc then compare
for (String val : deleteColumnValues) {
try {
final Long timestamp = getTimeStamp(val, regional);
if (timestamp <= nowSecond) {
latestDeleteColumnValue = deleteColumn + "=" + val; //the delete column with value
message = SNSNotificationPartitionAddMsg.ATTACHED_VALID_PARITITION_KEY.name();
break;
}
} catch (ParseException ex) {
message = SNSNotificationPartitionAddMsg.INVALID_PARTITION_KEY_FORMAT.name();
log.debug("Failure of getting latest key due to invalid timestamp format {} {}:{}",
event.getName().getTableName(), deleteColumn, val);
break;
}
}
} else {
message = SNSNotificationPartitionAddMsg.NO_CANDIDATE_PARTITION_KEYS.name();
}
} else {
message = SNSNotificationPartitionAddMsg.MISSING_METADATA_INFO_FOR_PARTITION_KEY.name();
}
} catch (Exception ex) {
message = SNSNotificationPartitionAddMsg.FAILURE_OF_GET_LATEST_PARTITION_KEY.name();
log.error("Failure of createTablePartitionsUpdatePayload", ex.getMessage());
}
return new TablePartitionsUpdatePayload(
latestDeleteColumnValue,
partitionDtos.size(),
0,
message,
getPartitionNameListFromDtos(partitionDtos)
);
}
/**
* get descending order deletion column value.
*
* @param partitionDtos partition DTOs
* @param deleteColumn delete column name
* @return descending order deletion column
*/
@VisibleForTesting
private static List<String> getSortedDeletionPartitionKeys(final List<PartitionDto> partitionDtos,
final String deleteColumn) {
return partitionDtos.stream()
.map(x -> PartitionUtil.getPartitionKeyValues(x.getName().toString()).get(deleteColumn))
.filter(Objects::nonNull)
.sorted(Comparator.reverseOrder())
.collect(Collectors.toList());
}
/**
* get partition name list from list of partitionDtos. The returned list is capped at
* the first PARTITIONS_UPDATED_LIST_MAX_SIZE elements, if there are more than that number of elements
* in the input then the return is empty which serves as a signal that complete list cannot be included
*
* @param partitionDtos partition DTOs
* @return list of partition ids from the input list
*/
protected static List<String> getPartitionNameListFromDtos(final List<PartitionDto> partitionDtos) {
if (partitionDtos.size() > PARTITIONS_UPDATED_LIST_MAX_SIZE) {
// empty list signals
return Collections.emptyList();
}
return partitionDtos.stream()
.map(dto -> dto.getName().getPartitionName())
.collect(Collectors.toList());
}
/**
* convert string to time stamp.
* Three formats are accepted for now, which are basic standard ISO format and epoch timestamp format.
*
* @param timeStr time in string
* @param regional in pst
* @return timestamp
* @throws ParseException parsing error
*/
public Long getTimeStamp(final String timeStr, final boolean regional) throws ParseException {
final Matcher m = TIMESTAMP_FORMAT.matcher(timeStr);
if (m.find()) {
return Long.parseLong(m.group("time"));
}
if (ISO_BASIC.matcher(timeStr).matches()) {
if (regional) {
return this.simpleDateFormatRegional.parse(timeStr).toInstant().getEpochSecond();
} else {
return this.simpleDateFormatUTC.parse(timeStr).toInstant().getEpochSecond();
}
}
throw new ParseException("Unknown format", 0);
}
}
| 2,164 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/services/notifications/sns/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Classes related to setting up and sending SNS Notifications.
*
* @author tgianos
* @since 0.1.47
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.services.notifications.sns;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,165 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata functional test classes.
*/
package com.netflix.metacat.metadata;
| 2,166 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata functional test classes.
*/
package com.netflix.metacat.metadata.store;
| 2,167 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata test classes.
*/
package com.netflix.metacat.metadata.store.data;
| 2,168 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/CrdbDataMetadataRepositoryTests.java
|
//CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-crdb"})
@Transactional
@AutoConfigureDataJpa
public class CrdbDataMetadataRepositoryTests extends DataMetadataRepositoryTests {
}
| 2,169 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/CrdbDefinitionMetadataRepositoryTests.java
|
//CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-crdb"})
@Transactional
@AutoConfigureDataJpa
public class CrdbDefinitionMetadataRepositoryTests extends DefinitionMetadataRepositoryTests {
}
| 2,170 |
0 |
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/functionalTest/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata repository test classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 2,171 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata;
| 2,172 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/util/EntityTestUtil.java
|
//CHECKSTYLE:OFF
package com.netflix.metacat.metadata.util;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.entities.AuditEntity;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import java.time.Instant;
public class EntityTestUtil {
public static ObjectMapper objectMapper = new ObjectMapper()
.findAndRegisterModules()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.setSerializationInclusion(JsonInclude.Include.ALWAYS);
public static DataMetadataEntity createDataMetadataEntity() {
return createDataMetadataEntity("s3://iceberg/bucket");
}
public static DataMetadataEntity createDataMetadataEntity(String uri) {
return DataMetadataEntity.builder()
.uri(uri)
.data(createTestObjectNode())
.audit(createAuditEntity())
.build();
}
public static DefinitionMetadataEntity createDefinitionMetadataEntity() {
return createDefinitionMetadataEntity(QualifiedName.fromString("prodhive/foo/bar"));
}
public static DefinitionMetadataEntity createDefinitionMetadataEntity(QualifiedName name) {
return DefinitionMetadataEntity.builder()
.name(name)
.data(createTestObjectNode())
.audit(createAuditEntity())
.build();
}
public static AuditEntity createAuditEntity() {
return AuditEntity.builder()
.createdBy("metacat_user")
.lastModifiedBy("metacat_user")
.createdDate(Instant.now())
.lastModifiedDate(Instant.now())
.build();
}
public static ObjectNode createTestObjectNode() {
return objectMapper.createObjectNode().put("size", "50");
}
}
| 2,173 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/util/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata utility classes.
*/
package com.netflix.metacat.metadata.util;
| 2,174 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata.store;
| 2,175 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata test classes.
*/
package com.netflix.metacat.metadata.store.data;
| 2,176 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/DataMetadataRepositoryTests.java
|
//CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import com.netflix.metacat.metadata.util.EntityTestUtil;
import lombok.extern.slf4j.Slf4j;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.util.Assert;
import javax.transaction.Transactional;
import java.util.Optional;
/**
* Test data metadata repository APIs
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-h2db"})
@Transactional
@AutoConfigureDataJpa
@Slf4j
public class DataMetadataRepositoryTests {
@Autowired
public DataMetadataRepository dataMetadataRepository;
@Test
public void testCreateAndGet() {
DataMetadataEntity metadataEntity =
dataMetadataRepository.save(EntityTestUtil.createDataMetadataEntity());
// get the entity back
DataMetadataEntity savedEntity = dataMetadataRepository.getOne(metadataEntity.getId());
Assert.isTrue(savedEntity.equals(metadataEntity), "Retrieved entity should be the same");
String testUri = savedEntity.getUri();
log.info("Found test metadata entity Uri: {} and Id: {}",
testUri, savedEntity.getId());
// soft delete the entity
savedEntity.setDeleted(true);
dataMetadataRepository.saveAndFlush(savedEntity);
Optional<DataMetadataEntity> entity =
dataMetadataRepository.findByUri(testUri);
Assert.isTrue(entity.isPresent() && entity.get().isDeleted(),
"Entity should be soft-deleted");
// delete the entity
dataMetadataRepository.delete(savedEntity);
Assert.isTrue(!dataMetadataRepository.findByUri(testUri).isPresent(),
"Entity should be deleted");
}
}
| 2,177 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/DefinitionMetadataRepositoryTests.java
|
//CHECKSTYLE:OFF
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.configs.UserMetadataStoreConfig;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import com.netflix.metacat.metadata.util.EntityTestUtil;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import java.util.Optional;
/**
* Test definition metadata repository APIs
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {UserMetadataStoreConfig.class})
@ActiveProfiles(profiles = {"usermetadata-h2db"})
@Transactional
@AutoConfigureDataJpa
public class DefinitionMetadataRepositoryTests {
@Autowired
private DefinitionMetadataRepository definitionMetadataRepository;
@Test
public void testCreateAndGet() {
QualifiedName testQName = QualifiedName.fromString("prodhive/foo/bar");
DefinitionMetadataEntity metadataEntity =
definitionMetadataRepository.save(EntityTestUtil.createDefinitionMetadataEntity(testQName));
// get the entity back
DefinitionMetadataEntity savedEntity = definitionMetadataRepository.getOne(metadataEntity.getId());
Assert.isTrue(savedEntity.equals(metadataEntity), "Retrieved entity should be the same");
// soft delete the entity
savedEntity.setDeleted(true);
definitionMetadataRepository.saveAndFlush(savedEntity);
Optional<DefinitionMetadataEntity> entity =
definitionMetadataRepository.findByName(testQName);
Assert.isTrue(entity.isPresent() && entity.get().isDeleted(),
"Entity should be soft-deleted");
// delete the entity
definitionMetadataRepository.delete(savedEntity);
Assert.isTrue(!definitionMetadataRepository.findByName(testQName).isPresent(),
"Entity should be deleted");
}
}
| 2,178 |
0 |
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/test/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat metadata repository test classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 2,179 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/UserMetadataServiceImpl.java
|
package com.netflix.metacat.metadata;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.BaseUserMetadataService;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.metadata.store.UserMetadataStoreService;
import org.springframework.beans.factory.annotation.Autowired;
/**
* The Hibernate-based User metadata service implementation.
*
* @author rveeramacheneni
*/
public class UserMetadataServiceImpl extends BaseUserMetadataService {
private final UserMetadataStoreService userMetadataStoreService;
private final MetacatJson metacatJson;
private final Config config;
private final MetadataInterceptor metadataInterceptor;
/**
* Ctor.
*
* @param userMetadataStoreService The User metadata store service.
* @param metacatJson The Metacat jackson JSON mapper.
* @param config The config.
* @param metadataInterceptor The metadata interceptor.
*/
@Autowired
public UserMetadataServiceImpl(final UserMetadataStoreService userMetadataStoreService,
final MetacatJson metacatJson,
final Config config,
final MetadataInterceptor metadataInterceptor) {
this.userMetadataStoreService = userMetadataStoreService;
this.metacatJson = metacatJson;
this.config = config;
this.metadataInterceptor = metadataInterceptor;
}
}
| 2,180 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat User metadata classes.
*/
package com.netflix.metacat.metadata;
| 2,181 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/UserMetadataStoreService.java
|
package com.netflix.metacat.metadata.store;
import com.netflix.metacat.metadata.store.data.repositories.DataMetadataRepository;
import com.netflix.metacat.metadata.store.data.repositories.DefinitionMetadataRepository;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Storage interface for user metadata entity operations.
*
* @author rveeramacheneni
*/
@Slf4j
public class UserMetadataStoreService {
private final DefinitionMetadataRepository definitionMetadataRepository;
private final DataMetadataRepository dataMetadataRepository;
/**
* Ctor.
*
* @param definitionMetadataRepository The definition metadata repository.
* @param dataMetadataRepository The data metadata repository.
*/
@Autowired
public UserMetadataStoreService(@NonNull final DefinitionMetadataRepository definitionMetadataRepository,
@NonNull final DataMetadataRepository dataMetadataRepository) {
this.definitionMetadataRepository = definitionMetadataRepository;
this.dataMetadataRepository = dataMetadataRepository;
}
}
| 2,182 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata storage-related classes.
*/
package com.netflix.metacat.metadata.store;
| 2,183 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/configs/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* User metadata store config classes.
*/
package com.netflix.metacat.metadata.store.configs;
| 2,184 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/configs/UserMetadataStoreConfig.java
|
package com.netflix.metacat.metadata.store.configs;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.metadata.store.UserMetadataStoreService;
import com.netflix.metacat.metadata.store.data.repositories.DataMetadataRepository;
import com.netflix.metacat.metadata.store.data.repositories.DefinitionMetadataRepository;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
/**
* The user metadata store config.
*
* @author rveeramacheneni
*/
@Configuration
@EntityScan("com.netflix.metacat.metadata.store.data.*")
@EnableJpaRepositories("com.netflix.metacat.metadata.store.data.*")
public class UserMetadataStoreConfig {
/**
* The user metadata store service.
*
* @param definitionMetadataRepository The definition metadata repository.
* @param dataMetadataRepository The data metadata repository.
* @return the constructed bean.
*/
@Bean
public UserMetadataStoreService userMetadataStoreService(
final DefinitionMetadataRepository definitionMetadataRepository,
final DataMetadataRepository dataMetadataRepository) {
return new UserMetadataStoreService(definitionMetadataRepository, dataMetadataRepository);
}
/**
* Store metacat JSON Handler.
*
* @return The JSON handler
*/
@Bean
@ConditionalOnMissingBean(MetacatJson.class)
public MetacatJson metacatJson() {
return new MetacatJsonLocator();
}
}
| 2,185 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/QualifiedNameConverter.java
|
package com.netflix.metacat.metadata.store.data.converters;
import com.netflix.metacat.common.QualifiedName;
import lombok.extern.slf4j.Slf4j;
import javax.persistence.AttributeConverter;
import javax.persistence.Converter;
/**
* The attribute converter for the QualifiedName type.
*
* @author rveeramacheneni
*/
@Slf4j
@Converter(autoApply = true)
@SuppressWarnings("PMD")
public class QualifiedNameConverter implements AttributeConverter<QualifiedName, String> {
@Override
public String convertToDatabaseColumn(final QualifiedName attribute) {
return attribute == null ? null : attribute.toString();
}
@Override
public QualifiedName convertToEntityAttribute(final String dbData) {
return dbData == null ? null : QualifiedName.fromString(dbData);
}
}
| 2,186 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata converter classes.
*/
package com.netflix.metacat.metadata.store.data.converters;
| 2,187 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/converters/ObjectNodeConverter.java
|
package com.netflix.metacat.metadata.store.data.converters;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.json.MetacatJson;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.persistence.AttributeConverter;
import javax.persistence.Converter;
/**
* Attribute converter for Jackson ObjectNode type.
*
* @author rveeramacheneni
*/
@Slf4j
@Converter(autoApply = true)
@SuppressWarnings("PMD")
public class ObjectNodeConverter implements AttributeConverter<ObjectNode, String> {
private final MetacatJson metacatJson;
/**
* Ctor.
*
* @param metacatJson the Jackson object mapper.
*/
public ObjectNodeConverter(@NonNull final MetacatJson metacatJson) {
this.metacatJson = metacatJson;
}
@Override
public String convertToDatabaseColumn(final ObjectNode attribute) {
return attribute == null ? null : attribute.toString();
}
@Override
public ObjectNode convertToEntityAttribute(final String dbData) {
return dbData == null ? null : metacatJson.parseJsonObject(dbData);
}
}
| 2,188 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/DefinitionMetadataRepository.java
|
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.entities.DefinitionMetadataEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* The DefinitionMetadata entity repository.
*/
@Repository
public interface DefinitionMetadataRepository extends JpaRepository<DefinitionMetadataEntity, String> {
/**
* Find a definition metadata entity using the given QualifiedName.
*
* @param name The QualifiedName of the entity.
* @return The definition metadata entity.
*/
Optional<DefinitionMetadataEntity> findByName(QualifiedName name);
}
| 2,189 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/DataMetadataRepository.java
|
package com.netflix.metacat.metadata.store.data.repositories;
import com.netflix.metacat.metadata.store.data.entities.DataMetadataEntity;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* The data metadata entity repository.
*
* @author rveeramacheneni
*/
@Repository
public interface DataMetadataRepository extends JpaRepository<DataMetadataEntity, String> {
/**
* Find a data metadata entity using the given uri.
*
* @param uri The uri of the entity.
* @return The data metadata entity.
*/
Optional<DataMetadataEntity> findByUri(String uri);
}
| 2,190 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/repositories/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata repository classes.
*/
package com.netflix.metacat.metadata.store.data.repositories;
| 2,191 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/BaseUserMetadataEntity.java
|
package com.netflix.metacat.metadata.store.data.entities;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.metadata.store.data.converters.ObjectNodeConverter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import org.hibernate.annotations.ColumnDefault;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.MappedSuperclass;
/**
* Represents a basic user metadata entity.
*
* @author rveeramacheneni
*/
@MappedSuperclass
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(callSuper = true, of = {
"isDeleted"
})
@SuppressWarnings("PMD")
public class BaseUserMetadataEntity extends BaseEntity {
@Basic
@Column(name = "is_deleted", nullable = false)
@ColumnDefault("false")
protected boolean isDeleted;
@Basic
@Column(name = "data", columnDefinition = "jsonb")
@Convert(converter = ObjectNodeConverter.class)
protected ObjectNode data;
}
| 2,192 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/BaseEntity.java
|
package com.netflix.metacat.metadata.store.data.entities;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.MappedSuperclass;
import javax.persistence.Version;
/**
* Represents a basic metadata entity.
*
* @author rveeramacheneni
*/
@MappedSuperclass
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(of = "id")
@ToString(of = {
"id",
"version",
"audit"
})
@SuppressWarnings("PMD")
public abstract class BaseEntity {
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
@Setter(AccessLevel.NONE)
protected String id;
@Version
@Column(name = "version")
@Setter(AccessLevel.NONE)
protected Long version;
@Embedded
@Builder.Default
protected AuditEntity audit = new AuditEntity();
}
| 2,193 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/AuditEntity.java
|
package com.netflix.metacat.metadata.store.data.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.springframework.data.annotation.CreatedBy;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.LastModifiedBy;
import org.springframework.data.annotation.LastModifiedDate;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import java.time.Instant;
/**
* Embeddable entity with audit fields.
*
* @author rveeramacheneni
*/
@Embeddable
@Getter
@Setter
@Builder
@AllArgsConstructor
@NoArgsConstructor
@ToString(of = {
"createdBy",
"lastModifiedBy",
"createdDate",
"lastModifiedDate"
})
public class AuditEntity {
@Basic
@Column(name = "created_by", nullable = false)
@CreatedBy
protected String createdBy;
@Basic
@Column(name = "last_updated_by")
@LastModifiedBy
protected String lastModifiedBy;
@Basic
@Column(name = "created_date", updatable = false)
@CreatedDate
protected Instant createdDate;
@Basic
@Column(name = "last_updated_date")
@LastModifiedDate
protected Instant lastModifiedDate;
}
| 2,194 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/DefinitionMetadataEntity.java
|
package com.netflix.metacat.metadata.store.data.entities;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.metadata.store.data.converters.QualifiedNameConverter;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Convert;
import javax.persistence.Entity;
import javax.persistence.Table;
/**
* The definition metadata entity.
*
* @author rveeramacheneni
*/
@Entity
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(of = {
"name"
})
@Table(name = "definition_metadata")
@SuppressWarnings("PMD")
public class DefinitionMetadataEntity extends BaseUserMetadataEntity {
@Basic
@Column(name = "name", nullable = false, unique = true)
@Convert(converter = QualifiedNameConverter.class)
private QualifiedName name;
}
| 2,195 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/DataMetadataEntity.java
|
package com.netflix.metacat.metadata.store.data.entities;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.SuperBuilder;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Table;
/**
* Represents a data metadata entity.
*
* @author rveeramacheneni
*/
@Entity
@Getter
@Setter
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@ToString(of = {
"uri"
})
@Table(name = "data_metadata")
public class DataMetadataEntity extends BaseUserMetadataEntity {
@Basic
@Column(name = "uri", nullable = false, unique = true)
private String uri;
}
| 2,196 |
0 |
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data
|
Create_ds/metacat/metacat-metadata/src/main/java/com/netflix/metacat/metadata/store/data/entities/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Metacat Metadata entity classes.
*/
package com.netflix.metacat.metadata.store.data.entities;
| 2,197 |
0 |
Create_ds/crunch/crunch-spark/src/test/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-spark/src/test/java/org/apache/crunch/impl/spark/AvroByteArrayTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.spark;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Field.Order;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.crunch.impl.spark.serde.AvroSerDe;
import org.apache.crunch.types.avro.Avros;
import org.codehaus.jackson.node.JsonNodeFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class AvroByteArrayTest {
@Test
public void fieldsWithIgnoredSortOrderAreNotUsedInEquals() throws Exception {
Schema mySchema = Schema.createRecord("foo", "", "", false);
mySchema.setFields(Lists.newArrayList(new Field("field1",
Schema.create(Type.STRING),
null,
JsonNodeFactory.instance.textNode(""),
Order.ASCENDING), new Field("field2",
Schema.create(Type.STRING),
null,
JsonNodeFactory.instance.textNode(""),
Order.IGNORE)));
GenericRecordBuilder myGRB = new GenericRecordBuilder(mySchema);
Record myRecord1 = myGRB.set("field1", "hello").set("field2", "world").build();
Record myRecord2 = myGRB.set("field1", "hello").set("field2", "there").build();
assertEquals(myRecord1, myRecord2);
AvroSerDe serde = new AvroSerDe(Avros.generics(mySchema), null);
assertEquals(serde.toBytes(myRecord1), serde.toBytes(myRecord2));
}
}
| 2,198 |
0 |
Create_ds/crunch/crunch-spark/src/it/java/org/apache
|
Create_ds/crunch/crunch-spark/src/it/java/org/apache/crunch/SparkAvroParquetPipelineIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.impl.spark.SparkPipeline;
import org.apache.crunch.io.At;
import org.apache.crunch.io.parquet.AvroParquetFileSource;
import org.apache.crunch.io.parquet.AvroParquetFileSourceTarget;
import org.apache.crunch.io.parquet.AvroParquetFileTarget;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.Employee;
import org.apache.crunch.test.Person;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.fs.Path;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.apache.parquet.avro.AvroParquetReader;
import org.apache.parquet.avro.AvroParquetWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
public class SparkAvroParquetPipelineIT implements Serializable {
private transient File avroFile;
@Rule
public transient TemporaryPath tmpDir = new TemporaryPath(RuntimeParameters.TMP_DIR, "hadoop.tmp.dir");
@Before
public void setUp() throws IOException {
avroFile = tmpDir.getFile("test.avro.parquet");
}
private void populateGenericFile(List<GenericRecord> genericRecords, Schema schema) throws IOException {
FileOutputStream outputStream = new FileOutputStream(this.avroFile);
GenericDatumWriter<GenericRecord> genericDatumWriter = new GenericDatumWriter<GenericRecord>(schema);
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<GenericRecord>(genericDatumWriter);
dataFileWriter.create(schema, outputStream);
for (GenericRecord record : genericRecords) {
dataFileWriter.append(record);
}
dataFileWriter.close();
outputStream.close();
}
private void populateGenericParquetFile(List<GenericRecord> genericRecords, Schema schema) throws IOException {
AvroParquetWriter<GenericRecord> writer = new AvroParquetWriter<GenericRecord>(
new Path(avroFile.getPath()), schema);
for (GenericRecord record : genericRecords) {
writer.write(record);
}
writer.close();
}
@Test
public void toAvroParquetFileTarget() throws Exception {
GenericRecord savedRecord = new GenericData.Record(Person.SCHEMA$);
savedRecord.put("name", "John Doe");
savedRecord.put("age", 42);
savedRecord.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
populateGenericFile(Lists.newArrayList(savedRecord), Person.SCHEMA$);
Pipeline pipeline = new SparkPipeline("local", "avroparq");
PCollection<Person> genericCollection = pipeline.read(At.avroFile(avroFile.getAbsolutePath(),
Avros.records(Person.class)));
File outputFile = tmpDir.getFile("output");
Target parquetFileTarget = new AvroParquetFileTarget(outputFile.getAbsolutePath());
pipeline.write(genericCollection, parquetFileTarget);
pipeline.run();
Person person = genericCollection.materialize().iterator().next();
Path parquetFile = new Path(new File(outputFile, "part-r-00000.parquet").getPath());
AvroParquetReader<Person> reader = new AvroParquetReader<Person>(parquetFile);
try {
Person readPerson = reader.read();
assertThat(readPerson, is(person));
} finally {
reader.close();
pipeline.done();
}
}
@Test
public void toAvroParquetFileTargetFromParquet() throws Exception {
GenericRecord savedRecord = new GenericData.Record(Person.SCHEMA$);
savedRecord.put("name", "John Doe");
savedRecord.put("age", 42);
savedRecord.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
populateGenericParquetFile(Lists.newArrayList(savedRecord), Person.SCHEMA$);
Pipeline pipeline = new SparkPipeline("local", "avroparq");
PCollection<Person> genericCollection = pipeline.read(
new AvroParquetFileSource<Person>(new Path(avroFile.getAbsolutePath()), Avros.records(Person.class)));
File outputFile = tmpDir.getFile("output");
Target parquetFileTarget = new AvroParquetFileTarget(outputFile.getAbsolutePath());
pipeline.write(genericCollection, parquetFileTarget);
pipeline.run();
Person person = genericCollection.materialize().iterator().next();
Path parquetFile = new Path(new File(outputFile, "part-r-00000.parquet").getPath());
AvroParquetReader<Person> reader = new AvroParquetReader<Person>(parquetFile);
try {
Person readPerson = reader.read();
assertThat(readPerson, is(person));
} finally {
reader.close();
pipeline.done();
}
}
@Test
public void toAvroParquetFileMultipleTarget() throws Exception {
GenericRecord savedRecord = new GenericData.Record(Person.SCHEMA$);
savedRecord.put("name", "John Doe");
savedRecord.put("age", 42);
savedRecord.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
populateGenericFile(Lists.newArrayList(savedRecord), Person.SCHEMA$);
Pipeline pipeline = new SparkPipeline("local", "avroparq");
PCollection<Person> genericCollection = pipeline.read(At.avroFile(avroFile.getAbsolutePath(),
Avros.records(Person.class)));
PCollection<Employee> employees = genericCollection.parallelDo(new DoFn<Person, Employee>() {
@Override
public void process(Person person, Emitter<Employee> emitter) {
emitter.emit(new Employee(person.getName(), 0, "Eng"));
}
}, Avros.records(Employee.class));
File output1File = tmpDir.getFile("output1");
File output2File = tmpDir.getFile("output2");
pipeline.write(genericCollection, new AvroParquetFileTarget(output1File.getAbsolutePath()));
pipeline.write(employees, new AvroParquetFileSourceTarget(new Path(output2File.getAbsolutePath()),
Avros.records(Employee.class)));
pipeline.run();
Person person = genericCollection.materialize().iterator().next();
Employee employee = employees.materialize().iterator().next();
Path parquet1File = new Path(new File(output1File, "part-r-00000.parquet").getPath());
Path parquet2File = new Path(new File(output2File, "part-r-00000.parquet").getPath());
AvroParquetReader<Person> personReader = new AvroParquetReader<Person>(parquet1File);
try {
Person readPerson = personReader.read();
assertThat(readPerson, is(person));
} finally {
personReader.close();
pipeline.done();
}
AvroParquetReader<Employee> employeeReader = new AvroParquetReader<Employee>(parquet2File);
try {
Employee readEmployee = employeeReader.read();
assertThat(readEmployee, is(employee));
} finally {
employeeReader.close();
}
}
@Test
public void toAvroParquetFileTargetReadSource() throws Exception {
GenericRecord savedRecord = new GenericData.Record(Person.SCHEMA$);
savedRecord.put("name", "John Doe");
savedRecord.put("age", 42);
savedRecord.put("siblingnames", Lists.newArrayList("Jimmy", "Jane"));
populateGenericFile(Lists.newArrayList(savedRecord), Person.SCHEMA$);
Pipeline pipeline = new SparkPipeline("local", "avroparq");
PCollection<Person> genericCollection = pipeline.read(At.avroFile(avroFile.getAbsolutePath(),
Avros.records(Person.class)));
File outputFile = tmpDir.getFile("output");
Target parquetFileTarget = new AvroParquetFileTarget(outputFile.getAbsolutePath());
pipeline.write(genericCollection, parquetFileTarget);
pipeline.run();
Person person = genericCollection.materialize().iterator().next();
PCollection<Person> retrievedPeople = pipeline.read(new AvroParquetFileSource<Person>(
new Path(outputFile.toURI()), Avros.records(Person.class)));
Person retrievedPerson = retrievedPeople.materialize().iterator().next();
assertThat(retrievedPerson, is(person));
Path parquetFile = new Path(new File(outputFile, "part-r-00000.parquet").getPath());
AvroParquetReader<Person> reader = new AvroParquetReader<Person>(parquetFile);
try {
Person readPerson = reader.read();
assertThat(readPerson, is(person));
} finally {
reader.close();
pipeline.done();
}
}
}
| 2,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.