index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import org.springframework.context.ApplicationEvent;
import javax.annotation.Nonnull;
/**
* Event within the Metacat JVM.
*
* @author amajumdar
* @author tgianos
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatEvent extends ApplicationEvent {
private final QualifiedName name;
private final MetacatRequestContext requestContext;
/**
* Constructor.
*
* @param name The qualified name of the resource this event pertains to
* @param requestContext The request context that triggered this event
* @param source The source object this event was generated from
*/
public MetacatEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source
) {
super(source);
this.name = name;
this.requestContext = requestContext;
}
}
| 2,000 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePreEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Pre database rename event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatRenameDatabasePreEvent extends MetacatEvent {
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
*/
public MetacatRenameDatabasePreEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source
) {
super(name, requestContext, source);
}
}
| 2,001 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePostEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Post database rename event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatRenameDatabasePostEvent extends MetacatEvent {
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
*/
public MetacatRenameDatabasePostEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source
) {
super(name, requestContext, source);
}
}
| 2,002 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePreEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Pre table delete event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatDeleteTablePreEvent extends MetacatEvent {
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
*/
public MetacatDeleteTablePreEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source
) {
super(name, requestContext, source);
}
}
| 2,003 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventListenerFactory.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.common.server.events;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.EventListenerFactory;
import java.lang.reflect.Method;
/**
* This class overrides the DefaultEventListenerFactory in the Spring container.
*
* @author amajumdar
* @since 1.2.x
*/
public class MetacatEventListenerFactory implements EventListenerFactory {
@Override
public boolean supportsMethod(final Method method) {
return true;
}
@Override
public ApplicationListener<?> createApplicationListener(final String beanName,
final Class<?> type,
final Method method) {
return new MetacatApplicationListenerMethodAdapter(beanName, type, method);
}
}
| 2,004 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPreEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Pre delete view event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatDeleteMViewPreEvent extends MetacatEvent {
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
*/
public MetacatDeleteMViewPreEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source
) {
super(name, requestContext, source);
}
}
| 2,005 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePreEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Pre table update event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatUpdateTablePreEvent extends MetacatEvent {
private final TableDto currentTable;
private final TableDto oldTable;
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
* @param oldTable old table info
* @param currentTable new table info
*/
public MetacatUpdateTablePreEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source,
@Nonnull @NonNull final TableDto oldTable,
@Nonnull @NonNull final TableDto currentTable
) {
super(name, requestContext, source);
this.oldTable = oldTable;
this.currentTable = currentTable;
}
}
| 2,006 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPostEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveResponseDto;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
import java.util.Collections;
import java.util.List;
/**
* Post table partition save event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatSaveTablePartitionPostEvent extends MetacatEvent {
private final List<PartitionDto> partitions;
private final PartitionsSaveResponseDto partitionsSaveResponse;
/**
* Constructor.
*
* @param name name
* @param metacatRequestContext context
* @param source The source object which threw this event
* @param partitions partitions
* @param partitionsSaveResponse response
*/
public MetacatSaveTablePartitionPostEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext metacatRequestContext,
@Nonnull @NonNull final Object source,
@Nonnull @NonNull final List<PartitionDto> partitions,
@Nonnull @NonNull final PartitionsSaveResponseDto partitionsSaveResponse
) {
super(name, metacatRequestContext, source);
this.partitions = Collections.unmodifiableList(partitions);
this.partitionsSaveResponse = partitionsSaveResponse;
}
}
| 2,007 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes metacat events.
*
* @author amajumdar
*/
package com.netflix.metacat.common.server.events;
| 2,008 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPreEvent.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.events;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.TableDto;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nonnull;
/**
* Pre view update event.
*/
@Getter
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
public class MetacatUpdateMViewPreEvent extends MetacatEvent {
private final TableDto table;
/**
* Constructor.
*
* @param name name
* @param requestContext context
* @param source The source object which threw this event
* @param table table info
*/
public MetacatUpdateMViewPreEvent(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final MetacatRequestContext requestContext,
@Nonnull @NonNull final Object source,
@Nonnull @NonNull final TableDto table
) {
super(name, requestContext, source);
this.table = table;
}
}
| 2,009 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorModule.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.Cluster;
import com.google.common.collect.ImmutableList;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.Scopes;
import com.google.inject.Singleton;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Map;
/**
* A Guice Module for the CassandraConnector.
*
* @author tgianos
* @since 1.0.0
*/
public class CassandraConnectorModule extends AbstractModule {
private static final String CONTACT_POINTS_KEY = "cassandra.contactPoints";
private static final String PORT_KEY = "cassandra.port";
private static final String USERNAME_KEY = "cassandra.username";
private static final String PASSWORD_KEY = "cassandra.password";
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName catalog shard name
* @param configuration connector configuration
*/
CassandraConnectorModule(
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(CassandraTypeConverter.class).toInstance(new CassandraTypeConverter());
this.bind(CassandraExceptionMapper.class).toInstance(new CassandraExceptionMapper());
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, CassandraConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, CassandraConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, CassandraConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
/**
* Creates a cluster to use for connecting to Cassandra.
*
* @return The cluster singleton to use within the Injector
*/
@Provides
@Singleton
Cluster provideCluster() {
final Cluster.Builder builder = Cluster.builder().withClusterName(this.catalogShardName);
// Contact points are required
final String contactPointsString = this.configuration.get(CONTACT_POINTS_KEY);
if (contactPointsString == null) {
throw new IllegalArgumentException(CONTACT_POINTS_KEY + " value is missing and is required.");
}
final String[] contactPoints = contactPointsString.split(",");
final ImmutableList.Builder<InetAddress> contactAddresses = ImmutableList.builder();
for (final String contactPoint : contactPoints) {
try {
contactAddresses.add(InetAddress.getByName(contactPoint));
} catch (final UnknownHostException uhe) {
throw new IllegalArgumentException("Can't parse contact point " + contactPoint, uhe);
}
}
builder.addContactPoints(contactAddresses.build());
final String port = this.configuration.get(PORT_KEY);
if (port != null) {
builder.withPort(Integer.parseInt(port));
}
final String username = this.configuration.get(USERNAME_KEY);
final String password = this.configuration.get(PASSWORD_KEY);
if (username != null && password != null) {
builder.withCredentials(username, password);
}
return builder.build();
}
}
| 2,010 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Session;
import lombok.Getter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Abstract class for common Cassandra methods based around the Cluster.
*
* @author tgianos
* @since 1.0.0
*/
@Getter
abstract class CassandraService {
private final Cluster cluster;
private final CassandraExceptionMapper exceptionMapper;
CassandraService(
@Nonnull @NonNull final Cluster cluster,
@Nonnull @NonNull final CassandraExceptionMapper exceptionMapper
) {
this.cluster = cluster;
this.exceptionMapper = exceptionMapper;
}
/**
* Execute a query on the Cassandra cluster pointed to by the given Cluster configuration.
*
* @param query The query to execute
* @return The query results if any
* @throws com.datastax.driver.core.exceptions.NoHostAvailableException if no host in the cluster can be
* contacted successfully to execute this
* query.
* @throws com.datastax.driver.core.exceptions.QueryExecutionException if the query triggered an execution
* exception, i.e. an exception thrown by
* Cassandra when it cannot execute
* the query with the requested consistency
* level successfully.
* @throws com.datastax.driver.core.exceptions.QueryValidationException if the query if invalid (syntax error,
* unauthorized or any other validation
* problem).
*/
ResultSet executeQuery(@Nonnull @NonNull final String query) {
try (final Session session = this.cluster.connect()) {
// From documentation it doesn't look like ResultSet needs to be closed
return session.execute(query);
}
}
}
| 2,011 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ColumnMetadata;
import com.datastax.driver.core.KeyspaceMetadata;
import com.datastax.driver.core.TableMetadata;
import com.datastax.driver.core.exceptions.DriverException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import java.util.Comparator;
import java.util.List;
/**
* Cassandra implementation of the ConnectorTableService.
*
* @author tgianos
* @see ConnectorTableService
* @since 3.0.0
*/
@Slf4j
public class CassandraConnectorTableService extends CassandraService implements ConnectorTableService {
private final CassandraTypeConverter typeConverter;
/**
* Constructor.
*
* @param cluster The cluster this service connects to
* @param exceptionMapper The exception mapper to use
* @param typeConverter The type converter to convert from CQL types to Metacat Types
*/
@Inject
public CassandraConnectorTableService(
@Nonnull @NonNull final Cluster cluster,
@Nonnull @NonNull final CassandraExceptionMapper exceptionMapper,
@Nonnull @NonNull final CassandraTypeConverter typeConverter
) {
super(cluster, exceptionMapper);
this.typeConverter = typeConverter;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name) {
final String keyspace = name.getDatabaseName();
final String table = name.getTableName();
log.debug("Attempting to delete Cassandra table {}.{} for request {}", keyspace, table, context);
try {
this.executeQuery("USE " + keyspace + "; DROP TABLE IF EXISTS " + table + ";");
log.debug("Successfully deleted Cassandra table {}.{} for request {}", keyspace, table, context);
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name) {
final String keyspace = name.getDatabaseName();
final String table = name.getTableName();
log.debug("Attempting to get metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
final TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
if (tableMetadata == null) {
throw new TableNotFoundException(name);
}
final TableInfo tableInfo = this.getTableInfo(name, tableMetadata);
log.debug("Successfully got metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
return tableInfo;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name
) {
final String keyspace = name.getDatabaseName();
final String table = name.getTableName();
log.debug("Checking if Cassandra table {}.{} exists for request {}", keyspace, table, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
return false;
}
final boolean exists = keyspaceMetadata.getTable(table) != null;
log.debug(
"Cassandra table {}.{} {} for request {}",
keyspace,
table,
exists ? "exists" : "doesn't exist",
context
);
return exists;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String keyspace = name.getDatabaseName();
log.debug("Attempting to list tables in Cassandra keyspace {} for request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
// TODO: Should we include views?
final List<TableInfo> tables = Lists.newArrayList();
for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) {
if (prefix != null && !tableMetadata.getName().startsWith(prefix.getTableName())) {
continue;
}
tables.add(this.getTableInfo(name, tableMetadata));
}
// Sort
if (sort != null) {
final Comparator<TableInfo> tableComparator = Comparator.comparing((t) -> t.getName().getTableName());
ConnectorUtils.sort(tables, sort, tableComparator);
}
// Paging
final List<TableInfo> pagedTables = ConnectorUtils.paginate(tables, pageable);
log.debug(
"Listed {} tables in Cassandra keyspace {} for request {}",
pagedTables.size(),
keyspace,
context
);
return pagedTables;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String catalog = name.getCatalogName();
final String keyspace = name.getDatabaseName();
log.debug("Attempting to list table names in Cassandra keyspace {} for request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
// TODO: Should we include views?
final List<QualifiedName> tableNames = Lists.newArrayList();
for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) {
final String tableName = tableMetadata.getName();
if (prefix != null && !tableName.startsWith(prefix.getTableName())) {
continue;
}
tableNames.add(QualifiedName.ofTable(catalog, keyspace, tableName));
}
// Sort
if (sort != null) {
final Comparator<QualifiedName> tableNameComparator = Comparator.comparing(QualifiedName::getTableName);
ConnectorUtils.sort(tableNames, sort, tableNameComparator);
}
// Paging
final List<QualifiedName> paged = ConnectorUtils.paginate(tableNames, pageable);
log.debug("Listed {} table names in Cassandra keyspace {} for request {}", paged.size(), keyspace, context);
return paged;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
private TableInfo getTableInfo(
@Nonnull @NonNull final QualifiedName name,
@Nonnull @NonNull final TableMetadata tableMetadata
) {
final ImmutableList.Builder<FieldInfo> fieldInfoBuilder = ImmutableList.builder();
// TODO: Ignores clustering, primary key, index, etc columns. We need to rework TableInfo to support
for (final ColumnMetadata column : tableMetadata.getColumns()) {
final String dataType = column.getType().toString();
fieldInfoBuilder.add(
FieldInfo.builder()
.name(column.getName())
.sourceType(dataType)
.type(this.typeConverter.toMetacatType(dataType))
.build()
);
}
return TableInfo.builder()
.name(QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableMetadata.getName()))
.fields(fieldInfoBuilder.build())
.build();
}
}
| 2,012 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
/**
* Just a default implementation of this interface that takes the default implementations as Cassandra doesn't
* have partitions.
*
* @author tgianos
* @see ConnectorPartitionService
* @since 1.0.0
*/
public class CassandraConnectorPartitionService implements ConnectorPartitionService {
}
| 2,013 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.exceptions.AlreadyExistsException;
import com.datastax.driver.core.exceptions.DriverException;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Convert Cassandra driver exceptions to connector exceptions.
*
* @author tgianos
* @see com.datastax.driver.core.exceptions.DriverException
* @see com.netflix.metacat.common.server.connectors.exception.ConnectorException
* @since 1.0.0
*/
public class CassandraExceptionMapper {
/**
* Convert the given Cassandra driver exception to a corresponding ConnectorException if possible, otherwise
* return a generic ConnectorException.
*
* @param de The Cassandra driver exception
* @param name The fully qualified name of the resource which was attempting to be accessed or modified at time of
* error
* @return A connector exception wrapping the DriverException
*/
public ConnectorException toConnectorException(
@Nonnull @NonNull final DriverException de,
@Nonnull @NonNull final QualifiedName name
) {
if (de instanceof AlreadyExistsException) {
final AlreadyExistsException ae = (AlreadyExistsException) de;
if (ae.wasTableCreation()) {
return new TableAlreadyExistsException(name, ae);
} else {
return new DatabaseAlreadyExistsException(name, ae);
}
} else {
return new ConnectorException(de.getMessage(), de);
}
}
}
| 2,014 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.KeyspaceMetadata;
import com.datastax.driver.core.MaterializedViewMetadata;
import com.datastax.driver.core.exceptions.DriverException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import java.util.Comparator;
import java.util.List;
/**
* Implementation of the database service for Cassandra. For Cassandra the {@code Keyspace} is the equivalent of a JDBC
* database.
*
* @author tgianos
* @see ConnectorDatabaseService
* @since 1.0.0
*/
@Slf4j
public class CassandraConnectorDatabaseService extends CassandraService implements ConnectorDatabaseService {
/**
* Constructor.
*
* @param cluster The cassandra cluster connection to use
* @param exceptionMapper The exception mapper to use to convert from DriverException to ConnectorException
*/
@Inject
public CassandraConnectorDatabaseService(
@Nonnull @NonNull final Cluster cluster,
@Nonnull @NonNull final CassandraExceptionMapper exceptionMapper
) {
super(cluster, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public void create(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final DatabaseInfo resource
) {
final String keyspace = resource.getName().getDatabaseName();
log.debug("Attempting to create a Cassandra Keyspace named {} for request {}", keyspace, context);
try {
// TODO: Make this take parameters for replication and the class
this.executeQuery(
"CREATE KEYSPACE "
+ keyspace
+ " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};"
);
log.debug("Successfully created Cassandra Keyspace named {} for request {}", keyspace, context);
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, resource.getName());
}
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name) {
final String keyspace = name.getDatabaseName();
log.debug("Attempting to drop Cassandra keyspace {} for request {}", keyspace, context);
try {
this.executeQuery("DROP KEYSPACE IF EXISTS " + keyspace + ";");
log.debug("Successfully dropped {} keyspace", keyspace);
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public DatabaseInfo get(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name
) {
final String keyspace = name.getDatabaseName();
log.debug("Attempting to get keyspace metadata for keyspace {} for request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
log.debug("Successfully found the keyspace metadata for {} for request {}", name, context);
return DatabaseInfo.builder().name(name).build();
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listViewNames(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName databaseName
) {
final String catalogName = databaseName.getCatalogName();
final String keyspace = databaseName.getDatabaseName();
log.debug("Attempting to get materialized view names for keyspace {} due to request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(databaseName);
}
final ImmutableList.Builder<QualifiedName> viewsBuilder = ImmutableList.builder();
for (final MaterializedViewMetadata view : keyspaceMetadata.getMaterializedViews()) {
viewsBuilder.add(
QualifiedName.ofView(catalogName, keyspace, view.getBaseTable().getName(), view.getName())
);
}
final List<QualifiedName> views = viewsBuilder.build();
log.debug("Successfully found {} views for keyspace {} due to request {}", views.size(), keyspace, context);
return views;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, databaseName);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name
) {
final String keyspace = name.getDatabaseName();
log.debug("Checking if keyspace {} exists for request {}", keyspace, context);
try {
final boolean exists = this.getCluster().getMetadata().getKeyspace(keyspace) != null;
log.debug("Keyspace {} {} for request {}", keyspace, exists ? "exists" : "doesn't exist", context);
return exists;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public List<DatabaseInfo> list(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Attempting to list keyspaces for request {}", context);
final ImmutableList.Builder<DatabaseInfo> keyspacesBuilder = ImmutableList.builder();
for (final QualifiedName keyspace : this.listNames(context, name, prefix, sort, pageable)) {
keyspacesBuilder.add(DatabaseInfo.builder().name(keyspace).build());
}
final List<DatabaseInfo> keyspaces = keyspacesBuilder.build();
log.debug("Successfully listed {} keyspaces for request {}", keyspaces.size(), context);
return keyspaces;
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(
@Nonnull @NonNull final ConnectorRequestContext context,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
log.debug("Attempting to list keyspaces for request {}", context);
try {
final List<QualifiedName> names = Lists.newArrayList();
for (final KeyspaceMetadata keyspace : this.getCluster().getMetadata().getKeyspaces()) {
final String keyspaceName = keyspace.getName();
if (prefix != null && !keyspaceName.startsWith(prefix.getDatabaseName())) {
continue;
}
names.add(QualifiedName.ofDatabase(name.getCatalogName(), keyspaceName));
}
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
ConnectorUtils.sort(names, sort, comparator);
}
final List<QualifiedName> results = ConnectorUtils.paginate(names, pageable);
log.debug("Finished listing keyspaces for request {}", context);
return results;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
}
| 2,015 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Cassandra Connector Plugin.
*
* @author tgianos
* @since 1.0.0
*/
public class CassandraConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "cassandra";
private static final CassandraTypeConverter TYPE_CONVERTER = new CassandraTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new CassandraConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 2,016 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.datastax.driver.core.Cluster;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.util.Map;
/**
* Cassandra implementation of a connector factory.
*
* @author tgianos
* @since 1.0.0
*/
class CassandraConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name The catalog name
* @param catalogShardName The catalog shard name
* @param configuration The catalog configuration
*/
CassandraConnectorFactory(
@Nonnull @NonNull final String name,
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
super(name, catalogShardName,
Lists.newArrayList(new CassandraConnectorModule(catalogShardName, configuration)));
}
/**
* {@inheritDoc}
*/
@Override
public void stop() {
super.stop();
// Stop the cassandra cluster
final Cluster cluster = this.getInjector().getInstance(Cluster.class);
if (cluster != null) {
cluster.close();
}
}
}
| 2,017 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes for connecting and getting metadata for Cassandra schemas and tables.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.cassandra;
| 2,018 |
0 |
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-cassandra/src/main/java/com/netflix/metacat/connector/cassandra/CassandraTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.cassandra;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.type.ArrayType;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Data type converter for Cassandra.
*
* @see <a href="http://cassandra.apache.org/doc/latest/cql/types.html">Cassandra Data Types</a>
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class CassandraTypeConverter implements ConnectorTypeConverter {
private static final Pattern TYPE_PATTERN = Pattern.compile("^\\s*?(\\w*)\\s*?(?:<\\s*?(.*)\\s*?>)?\\s*?$");
private static final int TYPE_GROUP = 1;
private static final int PARAM_GROUP = 2;
private static final Pattern MAP_PARAM_PATTERN = Pattern
.compile("^\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?,\\s*?((?:frozen\\s*?)?\\w*\\s*?(?:<.*>)?)\\s*?$");
private static final int MAP_KEY_GROUP = 1;
private static final int MAP_VALUE_GROUP = 2;
private static final Pattern TUPLE_PARAM_PATTERN
= Pattern.compile("(?:(\\w[\\w\\s]+(?:<[\\w+,\\s]+>\\s*?)?),?\\s*?)");
private static final int TUPLE_GROUP = 1;
/**
* {@inheritDoc}
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String type) {
final Matcher matcher = TYPE_PATTERN.matcher(type.toLowerCase());
// TODO: Escape case from recursion may be needed to avoid potential infinite
if (matcher.matches()) {
final String cqlType = matcher.group(TYPE_GROUP);
switch (cqlType) {
case "ascii":
return BaseType.STRING;
case "bigint":
return BaseType.BIGINT;
case "blob":
return VarbinaryType.createVarbinaryType(Integer.MAX_VALUE);
case "boolean":
return BaseType.BOOLEAN;
case "counter":
return BaseType.BIGINT;
case "date":
return BaseType.DATE;
case "decimal":
return DecimalType.createDecimalType();
case "double":
return BaseType.DOUBLE;
case "float":
return BaseType.FLOAT;
case "frozen":
return this.toMetacatType(matcher.group(PARAM_GROUP));
case "int":
return BaseType.INT;
case "list":
// The possible null for the PARAM_GROUP should be handled on recursive call throwing exception
return new ArrayType(this.toMetacatType(matcher.group(PARAM_GROUP)));
case "map":
final Matcher mapMatcher = MAP_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP));
if (mapMatcher.matches()) {
return new MapType(
this.toMetacatType(mapMatcher.group(MAP_KEY_GROUP)),
this.toMetacatType(mapMatcher.group(MAP_VALUE_GROUP))
);
} else {
throw new IllegalArgumentException("Unable to parse map params " + matcher.group(PARAM_GROUP));
}
case "smallint":
return BaseType.SMALLINT;
case "text":
return BaseType.STRING;
case "time":
return BaseType.TIME;
case "timestamp":
return BaseType.TIMESTAMP;
case "tinyint":
return BaseType.TINYINT;
case "tuple":
if (matcher.group(PARAM_GROUP) == null) {
throw new IllegalArgumentException("Empty tuple param group. Unable to parse");
}
final Matcher tupleMatcher = TUPLE_PARAM_PATTERN.matcher(matcher.group(PARAM_GROUP));
final ImmutableList.Builder<RowType.RowField> tupleFields = ImmutableList.builder();
int rowFieldNumber = 0;
while (tupleMatcher.find()) {
tupleFields.add(
new RowType.RowField(
this.toMetacatType(tupleMatcher.group(TUPLE_GROUP)),
"field" + rowFieldNumber++
)
);
}
return new RowType(tupleFields.build());
case "varchar":
return BaseType.STRING;
case "varint":
return BaseType.INT;
case "inet":
case "set":
case "timeuuid":
case "uuid":
default:
log.info("Currently unsupported type {}, returning Unknown type", cqlType);
return BaseType.UNKNOWN;
}
} else {
throw new IllegalArgumentException("Unable to parse CQL type " + type);
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
if (!(type instanceof ArrayType)) {
throw new IllegalArgumentException("Expected an ArrayType and got " + type.getClass());
}
final ArrayType arrayType = (ArrayType) type;
return "list<" + this.getElementTypeString(arrayType.getElementType()) + ">";
case BIGINT:
return "bigint";
case BOOLEAN:
return "boolean";
case CHAR:
// TODO: Should we make this unsupported?
return "text";
case DATE:
return "date";
case DECIMAL:
return "decimal";
case DOUBLE:
return "double";
case FLOAT:
return "float";
case INT:
return "int";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Cassandra doesn't support intervals.");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Cassandra doesn't support intervals.");
case JSON:
throw new UnsupportedOperationException("Cassandra doesn't support JSON natively.");
case MAP:
if (!(type instanceof MapType)) {
throw new IllegalArgumentException("Was expecting MapType instead it is " + type.getClass());
}
final MapType mapType = (MapType) type;
final Type keyType = mapType.getKeyType();
final Type valueType = mapType.getValueType();
return "map<" + this.getElementTypeString(keyType) + ", " + this.getElementTypeString(valueType) + ">";
case ROW:
if (!(type instanceof RowType)) {
throw new IllegalArgumentException("Was expecting RowType instead it is " + type.getClass());
}
final RowType rowType = (RowType) type;
final StringBuilder tupleBuilder = new StringBuilder();
tupleBuilder.append("tuple<");
// Tuple fields don't need to be frozen
boolean putComma = false;
for (final RowType.RowField field : rowType.getFields()) {
if (putComma) {
tupleBuilder.append(", ");
} else {
putComma = true;
}
tupleBuilder.append(this.fromMetacatType(field.getType()));
}
tupleBuilder.append(">");
return tupleBuilder.toString();
case SMALLINT:
return "smallint";
case STRING:
return "text";
case TIME:
return "time";
case TIME_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Cassandra doesn't support time with timezone");
case TIMESTAMP:
return "timestamp";
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Cassandra doesn't support time with timezone");
case TINYINT:
return "tinyint";
case UNKNOWN:
throw new UnsupportedOperationException("Cassandra doesn't support an unknown type");
case VARBINARY:
return "blob";
case VARCHAR:
return "text";
default:
throw new IllegalArgumentException("Unknown type: " + type.getTypeSignature().getBase());
}
}
private String getElementTypeString(final Type elementType) {
// Nested collections must have
if (elementType instanceof MapType || elementType instanceof ArrayType) {
return "frozen " + this.fromMetacatType(elementType);
} else {
return this.fromMetacatType(elementType);
}
}
}
| 2,019 |
0 |
Create_ds/metacat/metacat-war/src/main/java/com/netflix
|
Create_ds/metacat/metacat-war/src/main/java/com/netflix/metacat/MetacatWar.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
/**
* Servlet entry point for Spring when deployed as a WAR file.
*
* @author tgianos
* @since 1.1.0
*/
@SpringBootApplication
@ComponentScan(excludeFilters = @ComponentScan.Filter(
type = FilterType.ASPECTJ, pattern = "com.netflix.metacat.connector..*"))
public class MetacatWar extends SpringBootServletInitializer {
/**
* Constructor.
*/
public MetacatWar() {
}
/**
* Main.
*
* @param args Program arguments.
*/
public static void main(final String[] args) {
new MetacatWar()
.configure(new SpringApplicationBuilder(MetacatWar.class))
.run(args);
}
}
| 2,020 |
0 |
Create_ds/metacat/metacat-war/src/main/java/com/netflix
|
Create_ds/metacat/metacat-war/src/main/java/com/netflix/metacat/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Top level package for boot application files.
*
* @author tgianos
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,021 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector/polaris/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector smoke test classes.
*/
package com.netflix.metacat.connector.polaris;
| 2,022 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/functionalTest/java/com/netflix/metacat/connector/polaris/PolarisStoreConnectorFunctionalTest.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig;
import com.netflix.metacat.connector.polaris.store.PolarisStoreConnectorTest;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
/**
* Test persistence operations on Database objects.
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {PolarisPersistenceConfig.class})
@ActiveProfiles(profiles = {"polaris_functional_test"})
@AutoConfigureDataJpa
public class PolarisStoreConnectorFunctionalTest extends PolarisStoreConnectorTest {
}
| 2,023 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorTableServiceTest.java
|
package com.netflix.metacat.connector.polaris;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.properties.DefaultConfigImpl;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy;
import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig;
import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.spectator.api.NoopRegistry;
import org.junit.Assert;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import spock.lang.Shared;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Test PolarisConnectorTableService.
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {PolarisPersistenceConfig.class})
@ActiveProfiles(profiles = {"polarisconnectortest"})
@DirtiesContext(classMode = DirtiesContext.ClassMode.BEFORE_EACH_TEST_METHOD)
@AutoConfigureDataJpa
public class PolarisConnectorTableServiceTest {
private static final String CATALOG_NAME = "catalog_name";
private static final String DB_NAME = "db_name";
private static final QualifiedName DB_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB_NAME);
@Autowired
private PolarisStoreService polarisStoreService;
@Shared
private ConnectorRequestContext requestContext = new ConnectorRequestContext();
@Shared
private ThreadServiceManager serviceManager = Mockito.mock(ThreadServiceManager.class);
@Shared
private ConnectorContext connectorContext;
@Shared
private PolarisConnectorDatabaseService polarisDBService;
@Shared
private PolarisConnectorTableService polarisTableService;
/**
* Initialization.
*/
@BeforeEach
public void init() {
connectorContext = new ConnectorContext(CATALOG_NAME, CATALOG_NAME, "polaris",
new DefaultConfigImpl(new MetacatProperties()), new NoopRegistry(), null, Maps.newHashMap());
polarisDBService = new PolarisConnectorDatabaseService(polarisStoreService, connectorContext);
polarisTableService = new PolarisConnectorTableService(
polarisStoreService,
CATALOG_NAME,
polarisDBService,
new HiveConnectorInfoConverter(new HiveTypeConverter()),
new IcebergTableHandler(connectorContext,
new IcebergTableCriteriaImpl(connectorContext),
new IcebergTableOpWrapper(connectorContext, serviceManager),
new IcebergTableOpsProxy()),
new PolarisTableMapper(CATALOG_NAME),
connectorContext);
}
/**
* Test empty list tables.
*/
@Test
public void testListTablesEmpty() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "");
final List<QualifiedName> names = polarisTableService.listNames(
requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0));
Assert.assertEquals(names, Arrays.asList());
}
/**
* Test table exists.
*/
@Test
public void testTableExists() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
boolean exists = polarisTableService.exists(requestContext, qualifiedName);
Assert.assertFalse(exists);
polarisTableService.create(requestContext, tableInfo);
exists = polarisTableService.exists(requestContext, qualifiedName);
Assert.assertTrue(exists);
}
/**
* Test table list.
*/
@Test
public void testList() {
final QualifiedName name1 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final TableInfo tableInfo1 = TableInfo.builder()
.name(name1)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
polarisTableService.create(requestContext, tableInfo1);
final QualifiedName name2 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2");
final TableInfo tableInfo2 = TableInfo.builder()
.name(name2)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc2"))
.build();
polarisTableService.create(requestContext, tableInfo2);
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "");
final List<TableInfo> tables = polarisTableService.list(
requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0));
Assert.assertEquals(tables.size(), 2);
Assert.assertEquals(tables.stream().map(TableInfo::getName).collect(Collectors.toSet()),
ImmutableSet.of(name1, name2));
}
/**
* Test table creation then list tables.
*/
@Test
public void testTableCreationAndList() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
polarisTableService.create(requestContext, tableInfo);
final List<QualifiedName> names = polarisTableService.listNames(
requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(2, 0));
Assert.assertEquals(names, Arrays.asList(qualifiedName));
}
/**
* Test multiple table creation then list tables.
*/
@Test
public void testMultipleTableCreationAndList() {
final List<QualifiedName> createdTables = new ArrayList<>();
for (int i = 0; i < 10; i++) {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table" + i);
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc" + i))
.build();
polarisTableService.create(requestContext, tableInfo);
createdTables.add(qualifiedName);
}
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "");
final List<QualifiedName> names = polarisTableService.listNames(
requestContext, DB_QUALIFIED_NAME, qualifiedName, new Sort(null, SortOrder.ASC), new Pageable(20, 0));
Assert.assertEquals(names, createdTables);
}
/**
* Test table rename.
*/
@Test
public void testTableRename() {
final QualifiedName nameOld = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final QualifiedName nameNew = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2");
final TableInfo tableInfo = TableInfo.builder()
.name(nameOld)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
polarisTableService.create(requestContext, tableInfo);
boolean existsOld = polarisTableService.exists(requestContext, nameOld);
Assert.assertTrue(existsOld);
boolean existsNew = polarisTableService.exists(requestContext, nameNew);
Assert.assertFalse(existsNew);
polarisTableService.rename(requestContext, nameOld, nameNew);
existsOld = polarisTableService.exists(requestContext, nameOld);
Assert.assertFalse(existsOld);
existsNew = polarisTableService.exists(requestContext, nameNew);
Assert.assertTrue(existsNew);
}
/**
* Test delete table.
*/
@Test
public void testDeleteTable() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table");
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
polarisTableService.create(requestContext, tableInfo);
boolean exists = polarisTableService.exists(requestContext, qualifiedName);
Assert.assertTrue(exists);
polarisTableService.delete(requestContext, qualifiedName);
exists = polarisTableService.exists(requestContext, qualifiedName);
Assert.assertFalse(exists);
}
/**
* Test get table names.
*/
@Test
public void testGetTableNames() {
final QualifiedName name1 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final TableInfo tableInfo1 = TableInfo.builder()
.name(name1)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc1"))
.build();
polarisTableService.create(requestContext, tableInfo1);
final QualifiedName name2 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table2");
final TableInfo tableInfo2 = TableInfo.builder()
.name(name2)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc2"))
.build();
polarisTableService.create(requestContext, tableInfo2);
final QualifiedName name3 = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table3");
final TableInfo tableInfo3 = TableInfo.builder()
.name(name3)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", "loc3"))
.build();
polarisTableService.create(requestContext, tableInfo3);
final List<QualifiedName> tables = polarisTableService.getTableNames(requestContext, DB_QUALIFIED_NAME, "", -1);
Assert.assertEquals(tables.size(), 3);
Assert.assertEquals(tables, ImmutableList.of(name1, name2, name3));
}
/**
* Test get table using metadata json resource file.
*/
@Test
public void testGetTable() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final String location = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json";
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", location))
.build();
polarisTableService.create(requestContext, tableInfo);
final TableInfo tableResult = polarisTableService.get(requestContext, qualifiedName);
// check schema info correctly parsed from iceberg metadata file
final List<FieldInfo> fields = tableResult.getFields();
Assert.assertEquals(fields.size(), 3);
Assert.assertEquals(fields.get(0).getName(), "id");
Assert.assertEquals(fields.get(0).getComment(), "1st field");
Assert.assertEquals(fields.get(0).getSourceType(), "long");
Assert.assertEquals(fields.get(1).getName(), "data");
Assert.assertEquals(fields.get(1).getComment(), "2nd field");
Assert.assertEquals(fields.get(1).getSourceType(), "string");
Assert.assertEquals(fields.get(2).getName(), "dateint");
Assert.assertEquals(fields.get(2).getComment(), "3rd field");
Assert.assertEquals(fields.get(2).getSourceType(), "int");
}
/**
* Test table serde fields.
*/
@Test
public void testTableSerde() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final String location = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json";
final TableInfo tableInfo = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("table_type", "ICEBERG", "metadata_location", location))
.build();
polarisTableService.create(requestContext, tableInfo);
final TableInfo tableResult = polarisTableService.get(requestContext, qualifiedName);
// check serde info
Assert.assertNotNull(tableResult.getSerde());
Assert.assertEquals(tableResult.getSerde().getUri(), "src/test/resources");
Assert.assertEquals(tableResult.getSerde().getInputFormat(), "org.apache.hadoop.mapred.FileInputFormat");
Assert.assertEquals(tableResult.getSerde().getOutputFormat(), "org.apache.hadoop.mapred.FileOutputFormat");
Assert.assertEquals(tableResult.getSerde().getSerializationLib(),
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
}
/**
* Test update table reject cases.
*/
@Test
public void testUpdateTableReject() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final String location0 = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json";
final String location1 = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json";
final String location2 = "src/test/resources/metadata/00002-2d6c1951-31d5-4bea-8edd-e35746b172f3.metadata.json";
final Map<String, String> metadata = new HashMap<>();
metadata.put("metadata_location", location0);
final TableInfo tableInfo0 = TableInfo.builder().name(qualifiedName).metadata(metadata).build();
polarisTableService.create(requestContext, tableInfo0);
final TableInfo tableResult0 = polarisTableService.get(requestContext, qualifiedName);
Assert.assertEquals(tableResult0.getMetadata().get("metadata_location"), location0);
// check update location without setting location fails
metadata.put("previous_metadata_location", location1);
metadata.remove("metadata_location");
final TableInfo tableInfo1 = TableInfo.builder().name(qualifiedName).metadata(metadata).build();
Assertions.assertThrows(InvalidMetaException.class,
() -> polarisTableService.update(requestContext, tableInfo1));
// check update location to new location equals blank fails
metadata.put("previous_metadata_location", location0);
metadata.put("metadata_location", "");
final TableInfo tableInfo2 = TableInfo.builder().name(qualifiedName).metadata(metadata).build();
Assertions.assertThrows(InvalidMetaException.class,
() -> polarisTableService.update(requestContext, tableInfo2));
// check update location existing and previous location do not match fails
metadata.put("previous_metadata_location", location1);
metadata.put("metadata_location", location2);
final TableInfo tableInfo3 = TableInfo.builder().name(qualifiedName).metadata(metadata).build();
Assertions.assertThrows(TablePreconditionFailedException.class,
() -> polarisTableService.update(requestContext, tableInfo3));
}
/**
* Test update table using metadata json resource file.
*/
@Test
public void testUpdateTableAccept() {
final QualifiedName qualifiedName = QualifiedName.ofTable(CATALOG_NAME, DB_NAME, "table1");
final String location0 = "src/test/resources/metadata/00000-9b5d4c36-130c-4288-9599-7d850c203d11.metadata.json";
final TableInfo tableInfo0 = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("metadata_location", location0))
.build();
polarisTableService.create(requestContext, tableInfo0);
final TableInfo tableResult0 = polarisTableService.get(requestContext, qualifiedName);
Assert.assertEquals(tableResult0.getMetadata().get("metadata_location"), location0);
final String location1 = "src/test/resources/metadata/00001-abf48887-aa4f-4bcc-9219-1e1721314ee1.metadata.json";
final TableInfo tableInfo1 = TableInfo.builder()
.name(qualifiedName)
.metadata(ImmutableMap.of("previous_metadata_location", location0, "metadata_location", location1))
.build();
polarisTableService.update(requestContext, tableInfo1);
final TableInfo tableResult1 = polarisTableService.get(requestContext, qualifiedName);
Assert.assertEquals(tableResult1.getMetadata().get("metadata_location"), location1);
}
}
| 2,024 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorDatabaseServiceTest.java
|
package com.netflix.metacat.connector.polaris;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.properties.DefaultConfigImpl;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.spectator.api.NoopRegistry;
import org.junit.Assert;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import spock.lang.Shared;
import java.util.List;
/**
* Test PolarisConnectorTableService.
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {PolarisPersistenceConfig.class})
@ActiveProfiles(profiles = {"polarisconnectortest"})
@DirtiesContext(classMode = DirtiesContext.ClassMode.BEFORE_EACH_TEST_METHOD)
@AutoConfigureDataJpa
public class PolarisConnectorDatabaseServiceTest {
private static final String CATALOG_NAME = "catalog_name";
private static final String DB1_NAME = "db1_name";
private static final String DB2_NAME = "db2_name";
private static final QualifiedName DB1_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB1_NAME);
private static final QualifiedName DB2_QUALIFIED_NAME = QualifiedName.ofDatabase(CATALOG_NAME, DB2_NAME);
@Autowired
private PolarisStoreService polarisStoreService;
@Shared
private ConnectorContext connectorContext;
@Shared
private ConnectorRequestContext requestContext = new ConnectorRequestContext();
@Shared
private PolarisConnectorDatabaseService polarisDBService;
/**
* Initialization.
*/
@BeforeEach
public void init() {
connectorContext = new ConnectorContext(CATALOG_NAME, CATALOG_NAME, "polaris",
new DefaultConfigImpl(new MetacatProperties()), new NoopRegistry(), null, Maps.newHashMap());
polarisDBService = new PolarisConnectorDatabaseService(polarisStoreService, connectorContext);
}
/**
* Test create database.
*/
@Test
public void testCreateDb() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build();
polarisDBService.create(requestContext, info);
Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
}
/**
* Test get database that exists.
*/
@Test
public void testGetDb() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri").build();
polarisDBService.create(requestContext, info);
final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME);
Assert.assertEquals(info, result);
}
/**
* Test get database not found.
*/
@Test
public void testGetDbNotFound() {
Assertions.assertThrows(DatabaseNotFoundException.class,
() -> polarisDBService.get(requestContext, DB1_QUALIFIED_NAME));
}
/**
* Test create database that already exists.
*/
@Test
public void testCreateDbAlreadyExists() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build();
polarisDBService.create(requestContext, info);
Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
Assertions.assertThrows(DatabaseAlreadyExistsException.class,
() -> polarisDBService.create(requestContext, info));
}
/**
* Test create database with no uri set should fallback to default uri.
*/
@Test
public void testCreateDbDefaultUri() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build();
polarisDBService.create(requestContext, info);
final DatabaseInfo infoExpected = DatabaseInfo.builder()
.name(DB1_QUALIFIED_NAME).uri("db1_name.db").build();
final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME);
Assert.assertEquals(infoExpected, result);
}
/**
* Test update database.
*/
@Test
public void testUpdateDb() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri").build();
polarisDBService.create(requestContext, info);
Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
polarisDBService.update(requestContext, info);
final DatabaseInfo result = polarisDBService.get(requestContext, DB1_QUALIFIED_NAME);
Assert.assertEquals(info, result);
}
/**
* Test delete database.
*/
@Test
public void testDeleteDb() {
final DatabaseInfo info = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).build();
polarisDBService.create(requestContext, info);
Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
polarisDBService.delete(requestContext, DB1_QUALIFIED_NAME);
Assert.assertFalse(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
}
/**
* Test list databases.
*/
@Test
public void testListDb() {
final DatabaseInfo db1 = DatabaseInfo.builder().name(DB1_QUALIFIED_NAME).uri("uri1").build();
final DatabaseInfo db2 = DatabaseInfo.builder().name(DB2_QUALIFIED_NAME).uri("uri2").build();
polarisDBService.create(requestContext, db1);
polarisDBService.create(requestContext, db2);
Assert.assertTrue(polarisDBService.exists(requestContext, DB1_QUALIFIED_NAME));
Assert.assertTrue(polarisDBService.exists(requestContext, DB2_QUALIFIED_NAME));
final List<QualifiedName> dbNames =
polarisDBService.listNames(requestContext, QualifiedName.ofCatalog(CATALOG_NAME), null, null, null);
Assert.assertEquals(Sets.newHashSet(dbNames), Sets.newHashSet(DB1_QUALIFIED_NAME, DB2_QUALIFIED_NAME));
final List<DatabaseInfo> dbs =
polarisDBService.list(requestContext, QualifiedName.ofCatalog(CATALOG_NAME), null, null, null);
Assert.assertEquals(Sets.newHashSet(dbs), Sets.newHashSet(db1, db2));
}
}
| 2,025 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/PolarisConnectorPartitionServiceTest.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.same;
public class PolarisConnectorPartitionServiceTest {
private QualifiedName tableName;
private QualifiedName partitionName;
private QualifiedName partitionName2;
private TableInfo tableInfo;
private PartitionListRequest partitionsListRequest;
private PartitionsSaveRequest partitionsSaveRequest;
private List<String> partitionNames;
private List<String> uris;
private PartitionInfo partitionInfo;
private PartitionInfo partitionInfo2;
private Sort sort;
@Mock
private ConnectorRequestContext requestContext;
@Mock
private ConnectorContext connectorContext;
@Mock
private IcebergTableHandler icebergTableHandler;
@Mock
private PolarisConnectorTableService tableService;
private PolarisConnectorPartitionService polaris;
@Before
public void setup() {
MockitoAnnotations.initMocks(this);
tableName = QualifiedName.fromString("catalog/db/table");
partitionName = QualifiedName.fromString("catalog/db/table/dateint=20230101");
partitionName2 = QualifiedName.fromString("catalog/db/table/dateint=20230102");
tableInfo = TableInfo.builder().name(tableName).build();
partitionsListRequest = new PartitionListRequest();
partitionsSaveRequest = new PartitionsSaveRequest();
partitionNames = Arrays.asList("p1", "p2");
uris = Arrays.asList("u1", "u2");
partitionInfo = new PartitionInfo();
partitionInfo2 = new PartitionInfo();
sort = new Sort();
polaris = new PolarisConnectorPartitionService(connectorContext, icebergTableHandler, tableService);
}
@Test
public void getPartitions() {
partitionsListRequest.setFilter("filter");
partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101"));
partitionsListRequest.setPageable(new Pageable(1, 0));
partitionsListRequest.setSort(sort);
doReturn(Arrays.asList(partitionInfo, partitionInfo2))
.when(icebergTableHandler).getPartitions(
same(tableInfo),
same(connectorContext),
eq("filter"),
eq(Collections.singletonList("dateint=20230101")),
same(sort));
final List<PartitionInfo> partitions
= polaris.getPartitions(requestContext, tableName, partitionsListRequest, tableInfo);
assertThat(partitions).isEqualTo(Collections.singletonList(partitionInfo));
}
@Test
public void getPartitionKeys() {
partitionInfo.setName(partitionName);
partitionInfo2.setName(partitionName2);
partitionsListRequest.setFilter("filter");
partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101"));
partitionsListRequest.setPageable(new Pageable(2, 0));
partitionsListRequest.setSort(sort);
doReturn(Arrays.asList(partitionInfo, partitionInfo2))
.when(icebergTableHandler).getPartitions(
same(tableInfo),
same(connectorContext),
eq("filter"),
eq(Collections.singletonList("dateint=20230101")),
same(sort));
final List<String> partitionKeys
= polaris.getPartitionKeys(requestContext, tableName, partitionsListRequest, tableInfo);
assertThat(partitionKeys).isEqualTo(Arrays.asList("dateint=20230101", "dateint=20230102"));
}
@Test
public void get() {
partitionInfo.setName(partitionName);
partitionInfo2.setName(partitionName2);
doReturn(tableInfo).when(tableService)
.get(requestContext, QualifiedName.ofTable("catalog", "db", "table"));
doReturn(Arrays.asList(partitionInfo, partitionInfo2))
.when(icebergTableHandler).getPartitions(
same(tableInfo),
same(connectorContext),
eq(null),
eq(Collections.singletonList("dateint=20230101")),
eq(null));
final PartitionInfo partition = polaris.get(requestContext, partitionName);
assertThat(partition).isSameAs(partitionInfo);
}
@Test
public void getPartitionNames() {
assertThatThrownBy(() -> polaris.getPartitionNames(requestContext, uris, true))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
public void getPartitionUris() {
partitionInfo.setName(partitionName);
partitionInfo2.setName(partitionName2);
partitionInfo.setSerde(StorageInfo.builder().uri("uri1").build());
partitionInfo2.setSerde(StorageInfo.builder().uri("uri2").build());
doReturn(Arrays.asList(partitionInfo, partitionInfo2))
.when(icebergTableHandler).getPartitions(
same(tableInfo),
same(connectorContext),
eq("filter"),
eq(Collections.singletonList("dateint=20230101")),
same(sort));
partitionsListRequest.setFilter("filter");
partitionsListRequest.setPartitionNames(Collections.singletonList("dateint=20230101"));
partitionsListRequest.setPageable(new Pageable(1, 1));
partitionsListRequest.setSort(sort);
final List<String> partitionUris
= polaris.getPartitionUris(requestContext, tableName, partitionsListRequest, tableInfo);
assertThat(partitionUris).isEqualTo(Collections.singletonList("uri2"));
}
@Test
public void getPartitionCount() {
doReturn(Arrays.asList(partitionInfo, partitionInfo2))
.when(icebergTableHandler).getPartitions(
same(tableInfo),
same(connectorContext),
eq(null),
eq(null),
eq(null));
assertThat(polaris.getPartitionCount(requestContext, tableName, tableInfo)).isEqualTo(2);
}
@Test
public void create() {
assertThatThrownBy(() -> polaris.create(requestContext, partitionInfo))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
public void update() {
assertThatThrownBy(() -> polaris.update(requestContext, partitionInfo))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
public void delete() {
assertThatThrownBy(() -> polaris.delete(requestContext, partitionName))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
public void savePartitions() {
assertThatThrownBy(() -> polaris.savePartitions(requestContext, tableName, partitionsSaveRequest))
.isInstanceOf(UnsupportedOperationException.class);
}
@Test
public void deletePartitions() {
assertThatThrownBy(() -> polaris.deletePartitions(requestContext, tableName, partitionNames, tableInfo))
.isInstanceOf(UnsupportedOperationException.class);
}
}
| 2,026 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris test classes.
*/
package com.netflix.metacat.connector.polaris;
| 2,027 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/store/PolarisStoreConnectorTest.java
|
package com.netflix.metacat.connector.polaris.store;
import com.netflix.metacat.connector.polaris.common.PolarisUtils;
import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository;
import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository;
import org.junit.Assert;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.AutoConfigureDataJpa;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.OptimisticLockingFailureException;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import java.util.List;
import java.util.Optional;
import java.util.Random;
/**
* Test persistence operations on Database objects.
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(classes = {PolarisPersistenceConfig.class})
@ActiveProfiles(profiles = {"polaristest"})
@AutoConfigureDataJpa
public class PolarisStoreConnectorTest {
private static final String DB_NAME_FOO = "foo";
private static final String TBL_NAME_BAR = "bar";
private static final String DEFAULT_METACAT_USER = "metacat_user";
private static Random random = new Random(System.currentTimeMillis());
@Autowired
private PolarisDatabaseRepository repo;
@Autowired
private PolarisTableRepository tblRepo;
@Autowired
private PolarisStoreConnector polarisConnector;
private static String generateDatabaseName() {
return DB_NAME_FOO + "_" + random.nextLong();
}
private static String generateTableName() {
return TBL_NAME_BAR + "_" + random.nextLong();
}
private PolarisDatabaseEntity createDB(final String dbName) {
final String location = "file://temp";
final PolarisDatabaseEntity entity = polarisConnector.createDatabase(dbName, location, "metacat_user");
// assert that database exists, post-creation.
Assert.assertTrue(polarisConnector.databaseExistsById(entity.getDbId()));
Assert.assertTrue(polarisConnector.databaseExists(dbName));
Assert.assertEquals(0L, entity.getVersion().longValue());
Assert.assertTrue(entity.getDbId().length() > 0);
Assert.assertEquals(dbName, entity.getDbName());
Assert.assertEquals(location, entity.getLocation());
Assert.assertEquals(DEFAULT_METACAT_USER, entity.getAudit().getCreatedBy());
final Optional<PolarisDatabaseEntity> fetchedEntity = polarisConnector.getDatabase(dbName);
Assert.assertTrue(fetchedEntity.isPresent());
Assert.assertEquals(entity, fetchedEntity.get());
return entity;
}
private PolarisTableEntity createTable(final String dbName, final String tblName) {
final PolarisTableEntity entity = polarisConnector.createTable(dbName, tblName,
"loc", PolarisUtils.DEFAULT_METACAT_USER);
Assert.assertTrue(polarisConnector.tableExistsById(entity.getTblId()));
Assert.assertTrue(polarisConnector.tableExists(dbName, tblName));
Assert.assertTrue(entity.getTblId().length() > 0);
Assert.assertTrue(entity.getVersion() >= 0);
Assert.assertEquals(dbName, entity.getDbName());
Assert.assertEquals(tblName, entity.getTblName());
final Optional<PolarisTableEntity> fetchedEntity = polarisConnector.getTable(dbName, tblName);
Assert.assertTrue(fetchedEntity.isPresent());
Assert.assertEquals(entity, fetchedEntity.get());
return entity;
}
/**
* Test Database object creation and persistence.
*/
@Test
public void testCreateDB() {
final PolarisDatabaseEntity savedEntity = createDB(generateDatabaseName());
}
/**
* Test that a table cannot be created if database is absent.
*/
@Test
public void testTableCreationFailIfDatabaseIsAbsent() {
Assertions.assertThrows(DataAccessException.class, () ->
polarisConnector.createTable(generateDatabaseName(), generateTableName(),
"loc", PolarisUtils.DEFAULT_METACAT_USER));
}
/**
* Test table creation if database exists.
* Verify table deletion
*/
@Test
public void testTableCreationAndDeletion() {
final String dbName = generateDatabaseName();
final String tblName = generateTableName();
final PolarisDatabaseEntity dbEntity = createDB(dbName);
final PolarisTableEntity tblEntity = createTable(dbName, tblName);
polarisConnector.deleteTable(dbName, tblName);
Assert.assertFalse(polarisConnector.tableExistsById(tblEntity.getTblId()));
}
/**
* Test to verify that table names fetch works.
*/
@Test
public void testPaginatedFetch() {
final String dbName = generateDatabaseName();
final PolarisDatabaseEntity dbEntity = createDB(dbName);
List<String> tblNames = polarisConnector.getTables(dbName, "");
Assert.assertEquals(0, tblNames.size());
final String tblNameA = "A_" + generateTableName();
final String tblNameB = "B_" + generateTableName();
final String tblNameC = "C_" + generateTableName();
createTable(dbName, tblNameA);
createTable(dbName, tblNameB);
createTable(dbName, tblNameC);
tblNames = polarisConnector.getTables(dbName, "");
Assert.assertEquals(3, tblNames.size());
Assert.assertEquals(tblNameA, tblNames.get(0));
Assert.assertEquals(tblNameB, tblNames.get(1));
Assert.assertEquals(tblNameC, tblNames.get(2));
}
/**
* Test to verify that table name can be updated.
*/
@Test
public void testTableUpdate() {
// Create Table Entity in DB
final String dbName = generateDatabaseName();
final String tblName = generateTableName();
final PolarisDatabaseEntity dbEntity = createDB(dbName);
final PolarisTableEntity tblEntity = createTable(dbName, tblName);
// Update table name
final String newTblName = generateTableName();
tblEntity.setTblName(newTblName);
final PolarisTableEntity updatedTblEntity = polarisConnector.saveTable(tblEntity);
Assert.assertEquals(newTblName, updatedTblEntity.getTblName());
}
/**
* Test to validate that the table can be created via a PolarisTableEntity parameter.
* Also tests that metadata_location is getting stored.
*/
@Test
public void createTableWithSaveApi() {
final String dbName = generateDatabaseName();
createDB(dbName);
final String tblName = generateTableName();
final String metadataLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/foo";
final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser");
e.setMetadataLocation(metadataLocation);
final PolarisTableEntity savedEntity = polarisConnector.saveTable(e);
Assert.assertEquals(metadataLocation, savedEntity.getMetadataLocation());
}
/**
* Test to verify that compare-and-swap update of the metadata location works as expected.
*/
@Test
public void updateMetadataLocation() {
final String dbName = generateDatabaseName();
createDB(dbName);
final String tblName = generateTableName();
final String metadataLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/foo";
final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser");
e.setMetadataLocation(metadataLocation);
final PolarisTableEntity savedEntity = polarisConnector.saveTable(e);
final String newLocation = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/bar";
// update should fail since the expected location is not going to match.
boolean updatedSuccess = polarisConnector.updateTableMetadataLocation(
dbName, tblName, "unexpected_location",
newLocation, PolarisUtils.DEFAULT_METACAT_USER);
Assert.assertFalse(updatedSuccess);
// successful update should happen.
updatedSuccess = polarisConnector.updateTableMetadataLocation(dbName, tblName, metadataLocation,
newLocation, "new_user");
Assert.assertTrue(updatedSuccess);
final PolarisTableEntity updatedEntity = polarisConnector.
getTable(dbName, tblName).orElseThrow(() -> new RuntimeException("Expected to find saved entity"));
Assert.assertEquals(updatedEntity.getPreviousMetadataLocation(), metadataLocation);
// after the successful update, the same call should fail, since the current metadataLocation has changed.
updatedSuccess = polarisConnector.updateTableMetadataLocation(dbName, tblName, metadataLocation,
newLocation, PolarisUtils.DEFAULT_METACAT_USER);
Assert.assertFalse(updatedSuccess);
}
/**
* Test updateLocation(...) while save(...) is called in interleaved fashion.
*/
@Test
public void updateMetadataLocationWithInterleavedSave() {
final String dbName = generateDatabaseName();
createDB(dbName);
final String tblName = generateTableName();
final String location0 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location0";
final PolarisTableEntity e = new PolarisTableEntity(dbName, tblName, "metacatuser");
e.setMetadataLocation(location0);
final PolarisTableEntity savedEntity = polarisConnector.saveTable(e);
final String location1 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location1";
// update the metadata location.
final boolean updatedSuccess =
polarisConnector.updateTableMetadataLocation(dbName, tblName, location0, location1, "new_user");
Assert.assertTrue(updatedSuccess);
final String location2 = "s3/s3n://dataoven-prod/hive/dataoven_prod/warehouse/location2";
// At this point, savedEntity is stale, and any updates to savedEntity should not be allowed
// to persist.
savedEntity.setMetadataLocation(location2);
Assertions.assertThrows(OptimisticLockingFailureException.class, () -> {
polarisConnector.saveTable(savedEntity);
});
}
}
| 2,028 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/test/java/com/netflix/metacat/connector/polaris/store/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector test classes.
*/
package com.netflix.metacat.connector.polaris.store;
| 2,029 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorPartitionService.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
/**
* Partition service for Iceberg tables in Polaris.
*
* Currently, supports read-only methods with the exception of getPartitionNames.
*/
@RequiredArgsConstructor
public class PolarisConnectorPartitionService implements ConnectorPartitionService {
private final ConnectorContext context;
private final IcebergTableHandler icebergTableHandler;
private final PolarisConnectorTableService tableService;
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(@NonNull final ConnectorRequestContext requestContext,
@NonNull final QualifiedName tableName,
@NonNull final PartitionListRequest partitionsRequest,
@NonNull final TableInfo tableInfo) {
return ConnectorUtils.paginate(
icebergTableHandler.getPartitions(
tableInfo,
context,
partitionsRequest.getFilter(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getSort()
),
partitionsRequest.getPageable()
);
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@NonNull final ConnectorRequestContext requestContext,
@NonNull final QualifiedName tableName,
@NonNull final PartitionListRequest partitionsRequest,
@NonNull final TableInfo tableInfo) {
return getPartitions(requestContext, tableName, partitionsRequest, tableInfo).stream()
.map(info -> info.getName().getPartitionName())
.collect(Collectors.toList());
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(@NonNull final ConnectorRequestContext requestContext,
@NonNull final QualifiedName table,
@NonNull final TableInfo tableInfo) {
return icebergTableHandler.getPartitions(
tableInfo,
context,
null, // filer expression
null, // partition ids
null // sort
).size();
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionUris(@NonNull final ConnectorRequestContext requestContext,
@NonNull final QualifiedName table,
@NonNull final PartitionListRequest partitionsRequest,
@NonNull final TableInfo tableInfo) {
return getPartitions(requestContext, table, partitionsRequest, tableInfo).stream()
.map(partitionInfo -> partitionInfo.getSerde().getUri())
.collect(Collectors.toList());
}
/**
* {@inheritDoc}.
*/
@Override
public PartitionInfo get(@NonNull final ConnectorRequestContext requestContext,
@NonNull final QualifiedName partitionName) {
final QualifiedName tableName = QualifiedName.ofTable(
partitionName.getCatalogName(),
partitionName.getDatabaseName(),
partitionName.getTableName()
);
final TableInfo tableInfo = tableService.get(requestContext, tableName);
final List<PartitionInfo> partitions = icebergTableHandler.getPartitions(
tableInfo,
context,
null,
Collections.singletonList(partitionName.getPartitionName()),
null
);
return partitions.stream()
.filter(partitionInfo -> partitionInfo.getName().equals(partitionName))
.findFirst()
.orElseThrow(() -> new PartitionNotFoundException(tableName, partitionName.getPartitionName()));
}
}
| 2,030 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorDatabaseService.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.polaris.common.PolarisUtils;
import com.netflix.metacat.connector.polaris.mappers.PolarisDatabaseMapper;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.DataIntegrityViolationException;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
/**
* database service for polaris connector.
*/
@Slf4j
public class PolarisConnectorDatabaseService implements ConnectorDatabaseService {
private static final String DEFAULT_LOCATION_SUFFIX = ".db";
private static final String DB_DEFAULT_LOCATION = "polaris.db-default-location";
private final String defaultLocationPrefix;
private final PolarisStoreService polarisStoreService;
/**
* Constructor.
*
* @param polarisStoreService polaris connector
* @param connectorContext connector context
*/
public PolarisConnectorDatabaseService(
final PolarisStoreService polarisStoreService,
final ConnectorContext connectorContext
) {
this.polarisStoreService = polarisStoreService;
this.defaultLocationPrefix = connectorContext.getConfiguration().getOrDefault(DB_DEFAULT_LOCATION, "");
}
/**
* {@inheritDoc}.
*/
@Override
public void create(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
final QualifiedName name = databaseInfo.getName();
final String createdBy = PolarisUtils.getUserOrDefault(context);
// check exists then create in non-transactional optimistic manner
if (exists(context, name)) {
throw new DatabaseAlreadyExistsException(name);
}
try {
final String location = databaseInfo.getUri() == null
? this.defaultLocationPrefix + name.getDatabaseName() + DEFAULT_LOCATION_SUFFIX : databaseInfo.getUri();
this.polarisStoreService.createDatabase(name.getDatabaseName(), location, createdBy);
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed creating polaris database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext context, final QualifiedName name) {
// check exists then delete in non-transactional optimistic manner
if (!exists(context, name)) {
throw new DatabaseNotFoundException(name);
}
try {
this.polarisStoreService.deleteDatabase(name.getDatabaseName());
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed deleting polaris database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
final QualifiedName name = databaseInfo.getName();
try {
final PolarisDatabaseEntity db = polarisStoreService.getDatabase(name.getDatabaseName())
.orElseThrow(() -> new DatabaseNotFoundException(name));
// currently db objects have no mutable fields so this is noop
db.getAudit().setLastModifiedBy(PolarisUtils.getUserOrDefault(context));
polarisStoreService.saveDatabase(db.toBuilder().build());
} catch (DatabaseNotFoundException exception) {
log.error(String.format("Not found exception for polaris database %s", name), exception);
throw exception;
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed updating polaris database %s", databaseInfo.getName()), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo get(final ConnectorRequestContext context, final QualifiedName name) {
try {
final PolarisDatabaseMapper mapper = new PolarisDatabaseMapper(name.getCatalogName());
final PolarisDatabaseEntity db = polarisStoreService.getDatabase(name.getDatabaseName())
.orElseThrow(() -> new DatabaseNotFoundException(name));
return mapper.toInfo(db);
} catch (DatabaseNotFoundException exception) {
log.error(String.format("Not found exception for polaris database %s", name), exception);
throw exception;
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed get polaris database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
try {
return polarisStoreService.getDatabase(name.getDatabaseName()).isPresent();
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed exists polaris database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext context,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
List<QualifiedName> qualifiedNames = polarisStoreService.getAllDatabases().stream()
.map(d -> QualifiedName.ofDatabase(name.getCatalogName(), d.getDbName()))
.collect(Collectors.toCollection(ArrayList::new));
if (prefix != null) {
qualifiedNames = qualifiedNames.stream()
.filter(n -> n.startsWith(prefix))
.collect(Collectors.toCollection(ArrayList::new));
}
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed databases list names polaris prefix %s", prefix), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(
final ConnectorRequestContext context,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final PolarisDatabaseMapper mapper = new PolarisDatabaseMapper(name.getCatalogName());
List<PolarisDatabaseEntity> dbs = polarisStoreService.getAllDatabases();
if (prefix != null) {
dbs = dbs.stream()
.filter(n -> QualifiedName.ofDatabase(name.getCatalogName(), n.getDbName()).startsWith(prefix))
.collect(Collectors.toCollection(ArrayList::new));
}
if (sort != null) {
ConnectorUtils.sort(dbs, sort, Comparator.comparing(p -> p.getDbName()));
}
return ConnectorUtils.paginate(dbs, pageable).stream()
.map(d -> mapper.toInfo(d)).collect(Collectors.toList());
} catch (Exception exception) {
throw new ConnectorException(
String.format("Failed databases list polaris prefix %s", prefix), exception);
}
}
}
| 2,031 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorFactory.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.SpringConnectorFactory;
import com.netflix.metacat.connector.polaris.configs.PolarisConnectorConfig;
import com.netflix.metacat.connector.polaris.configs.PolarisPersistenceConfig;
import org.springframework.core.env.MapPropertySource;
import java.util.Collections;
/**
* Connector Factory for Polaris.
*/
class PolarisConnectorFactory extends SpringConnectorFactory {
/**
* Constructor.
*
* @param infoConverter info converter
* @param connectorContext connector config
*/
PolarisConnectorFactory(
final ConnectorInfoConverter infoConverter,
final ConnectorContext connectorContext
) {
super(infoConverter, connectorContext);
super.registerClazz(PolarisConnectorConfig.class,
PolarisPersistenceConfig.class);
super.addEnvProperties(new MapPropertySource(
"polaris_connector", Collections.unmodifiableMap(connectorContext.getConfiguration())));
super.refresh();
}
@Override
public ConnectorPartitionService getPartitionService() {
return ctx.getBean(PolarisConnectorPartitionService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.ctx.getBean(PolarisConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.ctx.getBean(PolarisConnectorTableService.class);
}
}
| 2,032 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisExceptionMapper.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Class to convert Iceberg client exceptions to connector exceptions.
*/
public class PolarisExceptionMapper {
/**
* Convert the given Iceberg exception to a ConnectorException.
*
* @param e The Iceberg client exception
* @param name The fully qualified name of the resource attempted to be accessed or modified at time of error
* @return A connector exception wrapping the DriverException
*/
public ConnectorException toConnectorException(
@Nonnull @NonNull final Exception e,
@Nonnull @NonNull final QualifiedName name
) {
// TODO: handling for exception more types
return new ConnectorException(e.getMessage());
}
}
| 2,033 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorTableService.java
|
package com.netflix.metacat.connector.polaris;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.metacat.connector.polaris.common.PolarisUtils;
import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.dao.DataIntegrityViolationException;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* table service for polaris connector.
*/
@Slf4j
public class PolarisConnectorTableService implements ConnectorTableService {
protected final PolarisStoreService polarisStoreService;
protected final PolarisConnectorDatabaseService polarisConnectorDatabaseService;
protected final HiveConnectorInfoConverter connectorConverter;
protected final ConnectorContext connectorContext;
protected final IcebergTableHandler icebergTableHandler;
protected final PolarisTableMapper polarisTableMapper;
protected final String catalogName;
/**
* Constructor.
*
* @param polarisStoreService polaris connector
* @param catalogName catalog name
* @param polarisConnectorDatabaseService connector database service
* @param connectorConverter converter
* @param icebergTableHandler iceberg table handler
* @param polarisTableMapper polaris table polarisTableMapper
* @param connectorContext the connector context
*/
public PolarisConnectorTableService(
final PolarisStoreService polarisStoreService,
final String catalogName,
final PolarisConnectorDatabaseService polarisConnectorDatabaseService,
final HiveConnectorInfoConverter connectorConverter,
final IcebergTableHandler icebergTableHandler,
final PolarisTableMapper polarisTableMapper,
final ConnectorContext connectorContext
) {
this.polarisStoreService = polarisStoreService;
this.polarisConnectorDatabaseService = polarisConnectorDatabaseService;
this.connectorConverter = connectorConverter;
this.connectorContext = connectorContext;
this.icebergTableHandler = icebergTableHandler;
this.polarisTableMapper = polarisTableMapper;
this.catalogName = catalogName;
}
/**
* {@inheritDoc}.
*/
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName name = tableInfo.getName();
final String createdBy = PolarisUtils.getUserOrDefault(requestContext);
// check exists then create in non-transactional optimistic manner
if (exists(requestContext, name)) {
throw new TableAlreadyExistsException(name);
}
try {
final PolarisTableEntity entity = polarisTableMapper.toEntity(tableInfo);
polarisStoreService.createTable(entity.getDbName(), entity.getTblName(),
entity.getMetadataLocation(), createdBy);
} catch (DataIntegrityViolationException | InvalidMetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
final String msg = String.format("Failed creating polaris table %s", name);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void rename(
final ConnectorRequestContext context,
final QualifiedName oldName,
final QualifiedName newName
) {
// check exists then rename in non-transactional optimistic manner
if (exists(context, newName)) {
throw new TableAlreadyExistsException(newName);
}
try {
final String lastModifiedBy = PolarisUtils.getUserOrDefault(context);
final PolarisTableEntity table = polarisStoreService
.getTable(oldName.getDatabaseName(), oldName.getTableName())
.orElseThrow(() -> new TableNotFoundException(oldName));
table.getAudit().setLastModifiedBy(lastModifiedBy);
polarisStoreService.saveTable(table.toBuilder().tblName(newName.getTableName()).build());
} catch (TableNotFoundException exception) {
log.error(String.format("Not found exception for polaris table %s", oldName), exception);
throw exception;
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(oldName, exception);
} catch (Exception exception) {
final String msg = String.format("Failed renaming polaris table %s", oldName);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final PolarisTableEntity polarisTableEntity = polarisStoreService
.getTable(name.getDatabaseName(), name.getTableName())
.orElseThrow(() -> new TableNotFoundException(name));
final TableInfo info = polarisTableMapper.toInfo(polarisTableEntity);
final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info);
// Return the iceberg table with just the metadata location included if requested.
if (connectorContext.getConfig().shouldFetchOnlyMetadataLocationEnabled()
&& requestContext.isIncludeMetadataLocationOnly()) {
return TableInfo.builder()
.metadata(Maps.newHashMap(info.getMetadata()))
.fields(Collections.emptyList())
.build();
}
return getIcebergTable(name, tableLoc, info,
requestContext.isIncludeMetadata(), connectorContext.getConfig().isIcebergCacheEnabled());
} catch (TableNotFoundException | IllegalArgumentException exception) {
log.error(String.format("Not found exception for polaris table %s", name), exception);
throw exception;
} catch (ConnectorException connectorException) {
log.error("Encountered connector exception for polaris table {}. {}", name, connectorException);
throw connectorException;
} catch (Exception exception) {
final String msg = String.format("Failed getting polaris table %s", name);
log.error(msg, exception);
throw exception;
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : "";
for (String tableName : polarisStoreService.getTables(name.getDatabaseName(), tableFilter)) {
final QualifiedName qualifiedName =
QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
qualifiedNames.add(qualifiedName);
}
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (Exception exception) {
final String msg = String.format("Failed polaris list table names %s using prefix %s", name, prefix);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName name = tableInfo.getName();
final Config conf = connectorContext.getConfig();
final String lastModifiedBy = PolarisUtils.getUserOrDefault(requestContext);
icebergTableHandler.update(tableInfo);
try {
final Map<String, String> newTableMetadata = tableInfo.getMetadata();
if (MapUtils.isEmpty(newTableMetadata)) {
log.warn("No parameters defined for iceberg table %s, no data update needed", name);
return;
}
final String prevLoc = newTableMetadata.get(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION);
final String newLoc = newTableMetadata.get(DirectSqlTable.PARAM_METADATA_LOCATION);
if (StringUtils.isBlank(prevLoc)) {
log.info("Provided previous {} empty for {} with new {}, treating as no location update needed.",
prevLoc, name, newLoc);
return;
}
if (StringUtils.isBlank(newLoc)) {
final String message = String.format(
"Invalid metadata for %s. Provided previous %s or new %s location is empty.",
name, prevLoc, newLoc);
log.error(message);
throw new InvalidMetaException(name, message, null);
}
if (conf.isIcebergPreviousMetadataLocationCheckEnabled()
&& !icebergTableHandler.doesMetadataLocationExist(name, prevLoc)) {
final String message = String.format(
"Provided previous metadata location: %s for table: %s does not exist.",
name, prevLoc);
log.error(message);
throw new InvalidMetaException(name, message, null);
}
// optimistically attempt to update metadata location
final boolean updated = polarisStoreService.updateTableMetadataLocation(
name.getDatabaseName(), name.getTableName(),
prevLoc, newLoc, lastModifiedBy);
// if succeeded then done, else try to figure out why and throw corresponding exception
if (updated) {
requestContext.setIgnoreErrorsAfterUpdate(true);
log.warn("Success servicing Iceberg commit request for table: {}, "
+ "previousLocation: {}, newLocation: {}",
tableInfo.getName(), prevLoc, newLoc);
return;
}
final PolarisTableEntity table = polarisStoreService
.getTable(name.getDatabaseName(), name.getTableName())
.orElseThrow(() -> new TableNotFoundException(name));
final String existingLoc = table.getMetadataLocation();
log.warn("Error servicing Iceberg commit request for tableId: {}, "
+ "previousLocation: {}, existingLocation: {}, newLocation: {}",
table.getTblId(), prevLoc, existingLoc, newLoc);
if (StringUtils.isBlank(existingLoc)) {
final String message = String.format(
"Invalid metadata location for %s existing location is empty.", name);
log.error(message);
throw new TablePreconditionFailedException(name, message, existingLoc, prevLoc);
}
if (StringUtils.equalsIgnoreCase(existingLoc, newLoc)) {
log.warn("Existing metadata location is the same as new. Existing: {}, New: {}",
existingLoc, newLoc);
return;
}
if (!Objects.equals(existingLoc, prevLoc)) {
final String message = String.format(
"Invalid metadata location for %s expected: %s, provided: %s", name, existingLoc, prevLoc);
log.error(message);
throw new TablePreconditionFailedException(name, message, existingLoc, prevLoc);
}
} catch (TableNotFoundException | InvalidMetaException | TablePreconditionFailedException exception) {
throw exception;
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
final String msg = String.format("Failed updating polaris table %s", tableInfo.getName());
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
return polarisStoreService.tableExists(name.getDatabaseName(), name.getTableName());
} catch (Exception exception) {
final String msg = String.format("Failed exists polaris table %s", name);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
// check exists then delete in non-transactional optimistic manner
if (!exists(requestContext, name)) {
throw new TableNotFoundException(name);
}
try {
polarisStoreService.deleteTable(name.getDatabaseName(), name.getTableName());
} catch (DataIntegrityViolationException exception) {
throw new InvalidMetaException(name, exception);
} catch (Exception exception) {
final String msg = String.format("Failed deleting polaris table %s", name);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : "";
final List<PolarisTableEntity> tbls =
polarisStoreService.getTableEntities(name.getDatabaseName(), tableFilter);
if (sort != null) {
ConnectorUtils.sort(tbls, sort, Comparator.comparing(t -> t.getTblName()));
}
return ConnectorUtils.paginate(tbls, pageable).stream()
.map(t -> polarisTableMapper.toInfo(t)).collect(Collectors.toList());
} catch (Exception exception) {
final String msg = String.format("Failed polaris list tables %s using prefix %s", name, prefix);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
/**
* Return the table metadata from cache if exists else make the iceberg call and refresh it.
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param info table info stored in hive metastore
* @param includeInfoDetails if true, will include more details like the manifest file content
* @param useCache true, if table can be retrieved from cache
* @return TableInfo
*/
@Cacheable(key = "'iceberg.table.' + #includeInfoDetails + '.' + #tableMetadataLocation", condition = "#useCache")
public TableInfo getIcebergTable(final QualifiedName tableName,
final String tableMetadataLocation,
final TableInfo info,
final boolean includeInfoDetails,
final boolean useCache) {
final IcebergTableWrapper icebergTable =
this.icebergTableHandler.getIcebergTable(tableName, tableMetadataLocation, includeInfoDetails);
return connectorConverter.fromIcebergTableToTableInfo(tableName, icebergTable, tableMetadataLocation, info);
}
@Override
public List<QualifiedName> getTableNames(
final ConnectorRequestContext context,
final QualifiedName name,
final String filter,
@Nullable final Integer limit) {
try {
if (!Strings.isNullOrEmpty(filter)) {
// workaround for trino issue, hive param filters not supported on iceberg tables
log.warn(String.format("Calling Polaris getTableNames with nonempty filter %s", filter));
}
final List<String> databaseNames = name.isDatabaseDefinition() ? ImmutableList.of(name.getDatabaseName())
: polarisStoreService.getAllDatabases().stream().map(d -> d.getDbName()).collect(Collectors.toList());
int limitSize = limit == null || limit < 0 ? Integer.MAX_VALUE : limit;
final List<QualifiedName> result = Lists.newArrayList();
for (int i = 0; i < databaseNames.size() && limitSize > 0; i++) {
final String databaseName = databaseNames.get(i);
final List<String> tableNames = polarisStoreService.getTables(name.getDatabaseName(), "");
result.addAll(tableNames.stream()
.map(n -> QualifiedName.ofTable(name.getCatalogName(), databaseName, n))
.limit(limitSize)
.collect(Collectors.toList()));
limitSize = limitSize - tableNames.size();
}
return result;
} catch (Exception exception) {
final String msg = String.format("Failed polaris get table names using %s", name);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
}
| 2,034 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/PolarisConnectorPlugin.java
|
package com.netflix.metacat.connector.polaris;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Polaris Connector Plugin.
*/
public class PolarisConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "polaris";
private static final HiveTypeConverter TYPE_CONVERTER = new HiveTypeConverter();
private static final HiveConnectorInfoConverter INFO_CONVERTER
= new HiveConnectorInfoConverter(TYPE_CONVERTER);
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new PolarisConnectorFactory(INFO_CONVERTER, connectorContext);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorInfoConverter getInfoConverter() {
return INFO_CONVERTER;
}
}
| 2,035 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector classes.
*/
package com.netflix.metacat.connector.polaris;
| 2,036 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/PolarisConnectorConsts.java
|
package com.netflix.metacat.connector.polaris.common;
/**
* Polaris connector consts.
*/
public final class PolarisConnectorConsts {
/**
* Max number of client-side retries for CRDB txns.
*/
public static final int MAX_CRDB_TXN_RETRIES = 5;
/**
* Default Ctor.
*/
private PolarisConnectorConsts() {
}
}
| 2,037 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/TransactionRetryAspect.java
|
package com.netflix.metacat.connector.polaris.common;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.monitoring.Metrics;
import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.springframework.core.Ordered;
import org.springframework.retry.RetryException;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import java.sql.SQLException;
/**
* Aspect for client-side transaction retries.
*/
@Aspect
@Slf4j
public class TransactionRetryAspect implements Ordered {
private static final String SQLSTATE_RETRY_TRANSACTION = "40001";
private final RetryTemplate retryTemplate;
private final ConnectorContext connectorContext;
/**
* Constructor.
*
* @param retryTemplate retry template.
* @param connectorContext the connector context.
*/
public TransactionRetryAspect(final RetryTemplate retryTemplate,
final ConnectorContext connectorContext) {
this.retryTemplate = retryTemplate;
this.connectorContext = connectorContext;
}
/**
* Pointcut for transactional methods in Polaris persistence classes.
*
* @param pjp joint point
* @return data results
* @throws Exception data exception
*/
@Around(value = "@annotation(org.springframework.transaction.annotation.Transactional)"
+ "&& within(com.netflix.metacat.connector.polaris.store..*)")
public Object retry(final ProceedingJoinPoint pjp) throws Exception {
return retryOnError(pjp);
}
private Object retryOnError(final ProceedingJoinPoint pjp) throws Exception {
return retryTemplate.<Object, Exception>execute(context -> {
try {
return pjp.proceed();
} catch (Throwable t) {
if (!TransactionSynchronizationManager.isActualTransactionActive() && isRetryError(t)) {
log.warn("Transaction failed with retry error: {}", t.getMessage());
connectorContext.getRegistry().counter(
Metrics.CounterTransactionRetryFailure.getMetricName()).increment();
throw new RetryException("TransactionRetryError", t);
}
throw new RuntimeException(t);
}
});
}
private boolean isRetryError(final Throwable t) {
for (Throwable ex : Throwables.getCausalChain(t)) {
if (ex instanceof SQLException && SQLSTATE_RETRY_TRANSACTION.equals(((SQLException) ex).getSQLState())) {
return true;
}
}
return false;
}
@Override
public int getOrder() {
return 99;
}
}
| 2,038 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/PolarisUtils.java
|
package com.netflix.metacat.connector.polaris.common;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import org.apache.commons.lang3.StringUtils;
/**
* Polaris connector utils.
*/
public final class PolarisUtils {
/**
* Default metacat user.
*/
public static final String DEFAULT_METACAT_USER = "metacat_user";
/**
* Default Ctor.
*/
private PolarisUtils() {
}
/**
* Get the user name from the request context or
* a default one if missing.
* @param context The request context.
* @return the user name.
*/
public static String getUserOrDefault(final ConnectorRequestContext context) {
final String userName = context.getUserName();
return StringUtils.isNotBlank(userName) ? userName : DEFAULT_METACAT_USER;
}
}
| 2,039 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/common/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris connector common classes.
*/
package com.netflix.metacat.connector.polaris.common;
| 2,040 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/PolarisPersistenceConfig.java
|
package com.netflix.metacat.connector.polaris.configs;
import com.netflix.metacat.connector.polaris.store.PolarisStoreConnector;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository;
import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.boot.autoconfigure.ImportAutoConfiguration;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.autoconfigure.jdbc.DataSourceTransactionManagerAutoConfiguration;
import org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration;
import org.springframework.boot.autoconfigure.transaction.TransactionAutoConfiguration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
/**
* The Polaris Store Persistence config.
*
*/
@Configuration
@EntityScan("com.netflix.metacat.connector.polaris.store.entities")
@EnableJpaRepositories("com.netflix.metacat.connector.polaris.store.repos")
@EnableJpaAuditing
@EnableTransactionManagement(proxyTargetClass = true)
@ImportAutoConfiguration({DataSourceAutoConfiguration.class,
DataSourceTransactionManagerAutoConfiguration.class, HibernateJpaAutoConfiguration.class,
TransactionAutoConfiguration.class})
public class PolarisPersistenceConfig {
/**
* Primary datasource. Since connectors can have data sources configured, polaris store JPA needs to be
* explicitly configured.
*
* @param dataSourceProperties datasource properties
* @return Datasource
*/
@Bean
@ConfigurationProperties(prefix = "spring.datasource.hikari")
public DataSource dataSource(final DataSourceProperties dataSourceProperties) {
return dataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
}
/**
* Datasource properties.
*
* @return DataSourceProperties
*/
@Bean
@Primary
@ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
/**
* Get an implementation of {@link PolarisStoreConnector}.
*
* @param repo - PolarisDatabaseRepository
* @param tblRepo - PolarisTableRepository
* @return PolarisStoreConnector
*/
@Bean
public PolarisStoreService polarisStoreService(
final PolarisDatabaseRepository repo, final PolarisTableRepository tblRepo) {
return new PolarisStoreConnector(repo, tblRepo);
}
}
| 2,041 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/PolarisConnectorConfig.java
|
package com.netflix.metacat.connector.polaris.configs;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteria;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy;
import com.netflix.metacat.connector.polaris.PolarisConnectorDatabaseService;
import com.netflix.metacat.connector.polaris.PolarisConnectorPartitionService;
import com.netflix.metacat.connector.polaris.PolarisConnectorTableService;
import com.netflix.metacat.connector.polaris.common.PolarisConnectorConsts;
import com.netflix.metacat.connector.polaris.common.TransactionRetryAspect;
import com.netflix.metacat.connector.polaris.mappers.PolarisTableMapper;
import com.netflix.metacat.connector.polaris.store.PolarisStoreService;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.retry.RetryException;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
/**
* Config for polaris connector.
*/
public class PolarisConnectorConfig {
/**
* Creates a new instance of a polaris connector partition service.
*
* @param icebergTableHandler iceberg table handler
* @param connectorContext connector context
* @param polarisTableService polaris table service
* @return PolarisConnectorPartitionService
*/
@Bean
public PolarisConnectorPartitionService polarisConnectorPartitionService(
final IcebergTableHandler icebergTableHandler,
final ConnectorContext connectorContext,
final PolarisConnectorTableService polarisTableService) {
return new PolarisConnectorPartitionService(connectorContext, icebergTableHandler, polarisTableService);
}
/**
* Create polaris connector database service.
*
* @param polarisStoreService polaris store service
* @param connectorContext connector context
* @return PolarisConnectorDatabaseService
*/
@Bean
@ConditionalOnMissingBean(PolarisConnectorDatabaseService.class)
public PolarisConnectorDatabaseService polarisDatabaseService(
final PolarisStoreService polarisStoreService,
final ConnectorContext connectorContext
) {
return new PolarisConnectorDatabaseService(polarisStoreService, connectorContext);
}
/**
* Create polaris connector table service.
*
* @param polarisStoreService polaris connector
* @param connectorConverter connector converter
* @param connectorDatabaseService polaris database service
* @param icebergTableHandler iceberg table handler
* @param polarisTableMapper polaris table mapper
* @param connectorContext connector context
* @return PolarisConnectorTableService
*/
@Bean
@ConditionalOnMissingBean(PolarisConnectorTableService.class)
public PolarisConnectorTableService polarisTableService(
final PolarisStoreService polarisStoreService,
final HiveConnectorInfoConverter connectorConverter,
final PolarisConnectorDatabaseService connectorDatabaseService,
final IcebergTableHandler icebergTableHandler,
final PolarisTableMapper polarisTableMapper,
final ConnectorContext connectorContext
) {
return new PolarisConnectorTableService(
polarisStoreService,
connectorContext.getCatalogName(),
connectorDatabaseService,
connectorConverter,
icebergTableHandler,
polarisTableMapper,
connectorContext
);
}
/**
* Create PolarisTableMapper.
* @param connectorContext server context
* @return PolarisTableMapper.
*/
@Bean
public PolarisTableMapper polarisTableMapper(final ConnectorContext connectorContext) {
return new PolarisTableMapper(connectorContext.getCatalogName());
}
/**
* Create iceberg table handler.
* @param connectorContext server context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
* @return IcebergTableHandler
*/
@Bean
public IcebergTableHandler icebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
return new IcebergTableHandler(
connectorContext,
icebergTableCriteria,
icebergTableOpWrapper,
icebergTableOpsProxy);
}
/**
* Create iceberg table criteria.
* @param connectorContext server context
* @return IcebergTableCriteria
*/
@Bean
public IcebergTableCriteria icebergTableCriteria(final ConnectorContext connectorContext) {
return new IcebergTableCriteriaImpl(connectorContext);
}
/**
* Create iceberg table operation wrapper.
* @param connectorContext server context
* @param threadServiceManager executor service
* @return IcebergTableOpWrapper
*/
@Bean
public IcebergTableOpWrapper icebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
return new IcebergTableOpWrapper(connectorContext, threadServiceManager);
}
/**
* Create thread service manager.
* @param connectorContext connector config
* @return ThreadServiceManager
*/
@Bean
public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) {
return new ThreadServiceManager(connectorContext.getRegistry(), connectorContext.getConfig());
}
/**
* Create IcebergTableOps proxy.
* @return IcebergTableOpsProxy
*/
@Bean
public IcebergTableOpsProxy icebergTableOps() {
return new IcebergTableOpsProxy();
}
/**
* Retry template to use for transaction retries.
*
* @return The retry template bean.
*/
@Bean
public RetryTemplate transactionRetryTemplate() {
final RetryTemplate result = new RetryTemplate();
result.setRetryPolicy(new SimpleRetryPolicy(
PolarisConnectorConsts.MAX_CRDB_TXN_RETRIES,
new ImmutableMap.Builder<Class<? extends Throwable>, Boolean>()
.put(RetryException.class, true)
.build()));
result.setBackOffPolicy(new ExponentialBackOffPolicy());
return result;
}
/**
* Aspect advice for transaction retries.
*
* @param retryTemplate the transaction retry template.
* @param connectorContext the connector context.
* @return TransactionRetryAspect
*/
@Bean
public TransactionRetryAspect transactionRetryAspect(
@Qualifier("transactionRetryTemplate") final RetryTemplate retryTemplate,
final ConnectorContext connectorContext) {
return new TransactionRetryAspect(retryTemplate, connectorContext);
}
}
| 2,042 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/configs/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris config classes.
*/
package com.netflix.metacat.connector.polaris.configs;
| 2,043 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/InfoToEntityMapper.java
|
package com.netflix.metacat.connector.polaris.mappers;
/**
* Info to Entity mapper.
*
* @param <I> The info type to map from.
* @param <E> The entity type to map to.
*/
public interface InfoToEntityMapper<I, E> {
/**
* Maps an info object to an entity object.
*
* @param info The info object to map from.
* @return The result entity object.
*/
E toEntity(I info);
}
| 2,044 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/EntityToInfoMapper.java
|
package com.netflix.metacat.connector.polaris.mappers;
/**
* Entity to Info Mapper.
*
* @param <E> The entity type to map from.
* @param <I> The info type to map to.
*/
public interface EntityToInfoMapper<E, I> {
/**
* Maps an Entity to the Info object.
*
* @param entity The entity to map from.
* @return The result info object.
*/
I toInfo(E entity);
}
| 2,045 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/PolarisDatabaseMapper.java
|
package com.netflix.metacat.connector.polaris.mappers;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
/**
* Database object mapper implementations.
*/
public class PolarisDatabaseMapper implements
EntityToInfoMapper<PolarisDatabaseEntity, DatabaseInfo>,
InfoToEntityMapper<DatabaseInfo, PolarisDatabaseEntity> {
// TODO: this can be reworked if PolarisDatabaseEntity starts tracking catalog name
private final String catalogName;
/**
* Constructor.
* @param catalogName the catalog name
*/
public PolarisDatabaseMapper(final String catalogName) {
this.catalogName = catalogName;
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo toInfo(final PolarisDatabaseEntity entity) {
final DatabaseInfo databaseInfo = DatabaseInfo.builder()
.name(QualifiedName.ofDatabase(catalogName, entity.getDbName()))
.uri(entity.getLocation())
.build();
return databaseInfo;
}
/**
* {@inheritDoc}.
*/
@Override
public PolarisDatabaseEntity toEntity(final DatabaseInfo info) {
final PolarisDatabaseEntity databaseEntity = PolarisDatabaseEntity.builder()
.dbName(info.getName().getDatabaseName())
.location(info.getUri())
.build();
return databaseEntity;
}
}
| 2,046 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/PolarisTableMapper.java
|
package com.netflix.metacat.connector.polaris.mappers;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import java.sql.Date;
import java.util.Map;
/**
* Table object mapper implementations.
*/
public class PolarisTableMapper implements
EntityToInfoMapper<PolarisTableEntity, TableInfo>,
InfoToEntityMapper<TableInfo, PolarisTableEntity> {
private static final String PARAMETER_SPARK_SQL_PROVIDER = "spark.sql.sources.provider";
private static final String PARAMETER_EXTERNAL = "EXTERNAL";
private static final String PARAMETER_METADATA_PREFIX = "/metadata/";
private final String catalogName;
/**
* Constructor.
* @param catalogName the catalog name
*/
public PolarisTableMapper(final String catalogName) {
this.catalogName = catalogName;
}
/**
* {@inheritDoc}.
*/
@Override
public TableInfo toInfo(final PolarisTableEntity entity) {
final int uriIndex = entity.getMetadataLocation().indexOf(PARAMETER_METADATA_PREFIX);
final TableInfo tableInfo = TableInfo.builder()
.name(QualifiedName.ofTable(catalogName, entity.getDbName(), entity.getTblName()))
.metadata(ImmutableMap.of(
DirectSqlTable.PARAM_METADATA_LOCATION, entity.getMetadataLocation(),
PARAMETER_EXTERNAL, "TRUE", PARAMETER_SPARK_SQL_PROVIDER, "iceberg",
DirectSqlTable.PARAM_TABLE_TYPE, DirectSqlTable.ICEBERG_TABLE_TYPE))
.serde(StorageInfo.builder().inputFormat("org.apache.hadoop.mapred.FileInputFormat")
.outputFormat("org.apache.hadoop.mapred.FileOutputFormat")
.serializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")
.uri(uriIndex > 0 ? entity.getMetadataLocation().substring(0, uriIndex) : "")
.build())
.auditInfo(AuditInfo.builder()
.createdBy(entity.getAudit().getCreatedBy())
.createdDate(Date.from(entity.getAudit().getCreatedDate()))
.lastModifiedBy(entity.getAudit().getLastModifiedBy())
.lastModifiedDate(Date.from(entity.getAudit().getLastModifiedDate()))
.build())
.build();
return tableInfo;
}
/**
* {@inheritDoc}.
*/
@Override
public PolarisTableEntity toEntity(final TableInfo info) {
final Map<String, String> metadata = info.getMetadata();
if (MapUtils.isEmpty(metadata)) {
final String message = String.format("No parameters defined for iceberg table %s", info.getName());
throw new InvalidMetaException(info.getName(), message, null);
}
final String location = metadata.get(DirectSqlTable.PARAM_METADATA_LOCATION);
if (StringUtils.isEmpty(location)) {
final String message = String.format("No metadata location defined for iceberg table %s", info.getName());
throw new InvalidMetaException(info.getName(), message, null);
}
final PolarisTableEntity tableEntity = PolarisTableEntity.builder()
.dbName(info.getName().getDatabaseName())
.tblName(info.getName().getTableName())
.metadataLocation(location)
.build();
return tableEntity;
}
}
| 2,047 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/mappers/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris mapper classes.
*/
package com.netflix.metacat.connector.polaris.mappers;
| 2,048 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/PolarisStoreService.java
|
package com.netflix.metacat.connector.polaris.store;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import java.util.List;
import java.util.Optional;
/**
* Interface methods for Polaris Store CRUD access.
*/
public interface PolarisStoreService {
/**
* Creates a database entry.
* @param databaseName database name
* @param location the database location.
* @param createdBy user creating this database.
* @return Polaris Database entity.
*/
PolarisDatabaseEntity createDatabase(String databaseName, String location, String createdBy);
/**
* Fetches database entry.
* @param databaseName database name
* @return Polaris Database entity
*/
Optional<PolarisDatabaseEntity> getDatabase(String databaseName);
/**
* Deletes the database entry.
* @param dbName database name.
*/
void deleteDatabase(String dbName);
/**
* Fetches all database entities.
* @return Polaris Database entities
*/
List<PolarisDatabaseEntity> getAllDatabases();
/**
* Checks if database with the name exists.
* @param databaseName database name to look up.
* @return true, if database exists. false, otherwise.
*/
boolean databaseExists(String databaseName);
/**
* Updates existing database entity.
* @param databaseEntity databaseEntity to save.
* @return the saved database entity.
*/
PolarisDatabaseEntity saveDatabase(PolarisDatabaseEntity databaseEntity);
/**
* Creates a table entry.
* @param dbName database name
* @param tableName table name
* @param metadataLocation metadata location
* @param createdBy user creating this table.
* @return Polaris Table entity.
*/
PolarisTableEntity createTable(String dbName, String tableName, String metadataLocation, String createdBy);
/**
* Fetches table entry.
* @param dbName database name
* @param tableName table name
* @return Polaris Table entity
*/
Optional<PolarisTableEntity> getTable(String dbName, String tableName);
/**
* Fetch table entities for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table entities in the database.
*/
List<PolarisTableEntity> getTableEntities(final String databaseName, final String tableNamePrefix);
/**
* Updates existing or creates new table entry.
* @param tableEntity tableEntity to save.
* @return The saved entity.
*/
PolarisTableEntity saveTable(PolarisTableEntity tableEntity);
/**
* Deletes the table entry.
* @param dbName database name.
* @param tableName table name.
*/
void deleteTable(String dbName, String tableName);
/**
* Checks if table with the name exists.
* @param databaseName database name of the table to be looked up.
* @param tableName table name to look up.
* @return true, if table exists. false, otherwise.
*/
boolean tableExists(String databaseName, String tableName);
/**
* Gets tables in the database and tableName prefix.
* @param databaseName database name
* @param tableNamePrefix table name prefix
* @return list of table names in the database with the table name prefix.
*/
List<String> getTables(String databaseName, String tableNamePrefix);
/**
* Do an atomic compare-and-swap to update the table's metadata location.
* @param databaseName database name of the table
* @param tableName table name
* @param expectedLocation expected current metadata-location of the table
* @param newLocation new metadata location of the table
* @param lastModifiedBy user updating the location.
* @return true, if update was successful. false, otherwise.
*/
boolean updateTableMetadataLocation(
String databaseName, String tableName,
String expectedLocation, String newLocation,
String lastModifiedBy);
}
| 2,049 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/PolarisStoreConnector.java
|
package com.netflix.metacat.connector.polaris.store;
import com.netflix.metacat.connector.polaris.store.entities.AuditEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import com.netflix.metacat.connector.polaris.store.repos.PolarisDatabaseRepository;
import com.netflix.metacat.connector.polaris.store.repos.PolarisTableRepository;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.domain.Sort;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* This class exposes APIs for CRUD operations.
*/
@Transactional(rollbackFor = Exception.class)
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
public class PolarisStoreConnector implements PolarisStoreService {
private final PolarisDatabaseRepository dbRepo;
private final PolarisTableRepository tblRepo;
/**
* Creates entry for new database.
* @param databaseName database name
* @return entity
*/
@Override
public PolarisDatabaseEntity createDatabase(final String databaseName,
final String location,
final String createdBy) {
final PolarisDatabaseEntity e = new PolarisDatabaseEntity(databaseName, location, createdBy);
return dbRepo.save(e);
}
/**
* Fetches database entry.
*
* @param databaseName database name
* @return Polaris Database entity
*/
@Override
public Optional<PolarisDatabaseEntity> getDatabase(final String databaseName) {
return dbRepo.findByDbName(databaseName);
}
/**
* Deletes the database entry.
*
* @param dbName database name.
*/
@Override
public void deleteDatabase(final String dbName) {
dbRepo.deleteByName(dbName);
}
/**
* Fetches all database entities.
*
* @return Polaris Database entities
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<PolarisDatabaseEntity> getAllDatabases() {
final int pageFetchSize = 1000;
final List<PolarisDatabaseEntity> retval = new ArrayList<>();
Pageable page = PageRequest.of(0, pageFetchSize);
boolean hasNext;
do {
final Slice<PolarisDatabaseEntity> dbs = dbRepo.getDatabases(page);
retval.addAll(dbs.toList());
hasNext = dbs.hasNext();
if (hasNext) {
page = dbs.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Checks if database with the name exists.
*
* @param databaseName database name to look up.
* @return true, if database exists. false, otherwise.
*/
@Override
public boolean databaseExists(final String databaseName) {
return dbRepo.existsByDbName(databaseName);
}
/**
* Updates existing database entity, or creates a new one if not present.
*
* @param databaseEntity databaseEntity to save.
* @return the saved database entity.
*/
@Override
public PolarisDatabaseEntity saveDatabase(final PolarisDatabaseEntity databaseEntity) {
return dbRepo.save(databaseEntity);
}
boolean databaseExistsById(final String dbId) {
return dbRepo.existsById(dbId);
}
/**
* Creates entry for new table.
* @param dbName database name
* @param tableName table name
* @param metadataLocation metadata location of the table.
* @param createdBy user creating this table.
* @return entity corresponding to created table entry
*/
@Override
public PolarisTableEntity createTable(final String dbName,
final String tableName,
final String metadataLocation,
final String createdBy) {
final AuditEntity auditEntity = AuditEntity.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
final PolarisTableEntity e = PolarisTableEntity.builder()
.audit(auditEntity)
.dbName(dbName)
.tblName(tableName)
.metadataLocation(metadataLocation)
.build();
return tblRepo.save(e);
}
/**
* Fetches table entry.
*
* @param tableName table name
* @return Polaris Table entity
*/
@Override
public Optional<PolarisTableEntity> getTable(final String dbName, final String tableName) {
return tblRepo.findByDbNameAndTblName(dbName, tableName);
}
/**
* Fetch table entities for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table entities in the database.
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<PolarisTableEntity> getTableEntities(final String databaseName, final String tableNamePrefix) {
final int pageFetchSize = 1000;
final List<PolarisTableEntity> retval = new ArrayList<>();
final String tblPrefix = tableNamePrefix == null ? "" : tableNamePrefix;
Pageable page = PageRequest.of(0, pageFetchSize, Sort.by("tblName").ascending());
Slice<PolarisTableEntity> tbls;
boolean hasNext;
do {
tbls = tblRepo.findAllTablesByDbNameAndTablePrefix(databaseName, tblPrefix, page);
retval.addAll(tbls.toList());
hasNext = tbls.hasNext();
if (hasNext) {
page = tbls.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Updates existing table entry.
*
* @param tableEntity tableEntity to save.
* @return The saved entity.
*/
@Override
public PolarisTableEntity saveTable(final PolarisTableEntity tableEntity) {
return tblRepo.save(tableEntity);
}
/**
* Deletes entry for table.
* @param dbName database name
* @param tableName table name
*/
@Override
public void deleteTable(final String dbName, final String tableName) {
tblRepo.deleteByName(dbName, tableName);
}
/**
* Checks if table with the name exists.
*
* @param tableName table name to look up.
* @return true, if table exists. false, otherwise.
*/
@Override
public boolean tableExists(final String databaseName, final String tableName) {
return tblRepo.existsByDbNameAndTblName(databaseName, tableName);
}
boolean tableExistsById(final String tblId) {
return tblRepo.existsById(tblId);
}
/**
* Fetch table names for given database.
* @param databaseName database name
* @param tableNamePrefix table name prefix. can be empty.
* @return table names in the database.
*/
@Override
@Transactional(propagation = Propagation.SUPPORTS)
public List<String> getTables(final String databaseName, final String tableNamePrefix) {
final int pageFetchSize = 1000;
final List<String> retval = new ArrayList<>();
final String tblPrefix = tableNamePrefix == null ? "" : tableNamePrefix;
Pageable page = PageRequest.of(0, pageFetchSize, Sort.by("tblName").ascending());
Slice<String> tblNames = null;
boolean hasNext = true;
do {
tblNames = tblRepo.findAllByDbNameAndTablePrefix(databaseName, tblPrefix, page);
retval.addAll(tblNames.toList());
hasNext = tblNames.hasNext();
if (hasNext) {
page = tblNames.nextPageable();
}
} while (hasNext);
return retval;
}
/**
* Do an atomic compare-and-swap to update the table's metadata location.
*
* @param databaseName database name of the table
* @param tableName table name
* @param expectedLocation expected current metadata-location of the table
* @param newLocation new metadata location of the table
* @param lastModifiedBy user updating the location.
* @return true, if update was successful. false, otherwise.
*/
@Override
public boolean updateTableMetadataLocation(
final String databaseName, final String tableName,
final String expectedLocation, final String newLocation,
final String lastModifiedBy) {
final int updatedRowCount =
tblRepo.updateMetadataLocation(databaseName, tableName,
expectedLocation, newLocation, lastModifiedBy, Instant.now());
return updatedRowCount > 0;
}
}
| 2,050 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris data classes.
*/
package com.netflix.metacat.connector.polaris.store;
| 2,051 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/PolarisDatabaseRepository.java
|
package com.netflix.metacat.connector.polaris.store.repos;
import com.netflix.metacat.connector.polaris.store.entities.PolarisDatabaseEntity;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import org.springframework.stereotype.Repository;
import java.util.Optional;
/**
* JPA repository implementation for storing PolarisDatabaseEntity.
*/
@Repository
public interface PolarisDatabaseRepository extends JpaRepository<PolarisDatabaseEntity, String>,
JpaSpecificationExecutor {
/**
* Fetch database entry.
* @param dbName database name
* @return database entry, if found
*/
Optional<PolarisDatabaseEntity> findByDbName(@Param("dbName") final String dbName);
/**
* Check if database with that name exists.
* @param dbName database name to look up.
* @return true, if database exists. false, otherwise.
*/
boolean existsByDbName(@Param("dbName") final String dbName);
/**
* Delete database entry by name.
* @param dbName database name.
*/
@Modifying
@Query("DELETE FROM PolarisDatabaseEntity e WHERE e.dbName = :dbName")
void deleteByName(@Param("dbName") final String dbName);
/**
* Fetch databases.
* @param page pageable.
* @return database entities.
*/
@Query("SELECT e FROM PolarisDatabaseEntity e")
Slice<PolarisDatabaseEntity> getDatabases(Pageable page);
}
| 2,052 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/PolarisTableRepository.java
|
package com.netflix.metacat.connector.polaris.store.repos;
import com.netflix.metacat.connector.polaris.store.entities.PolarisTableEntity;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Slice;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
import java.time.Instant;
import java.util.Optional;
/**
* JPA repository implementation for storing PolarisTableEntity.
*/
@Repository
public interface PolarisTableRepository extends JpaRepository<PolarisTableEntity, String>,
JpaSpecificationExecutor {
/**
* Delete table entry by name.
* @param dbName database name.
* @param tblName table name.
*/
@Modifying
@Query("DELETE FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName = :tblName")
@Transactional
void deleteByName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Fetch table names in database.
* @param dbName database name
* @param tableNamePrefix table name prefix. can be empty.
* @param page pageable.
* @return table names that belong to the database.
*/
@Query("SELECT e.tblName FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName LIKE :tableNamePrefix%")
Slice<String> findAllByDbNameAndTablePrefix(
@Param("dbName") final String dbName,
@Param("tableNamePrefix") final String tableNamePrefix,
Pageable page);
/**
* Fetch table entry.
* @param dbName database name
* @param tblName table name
* @return optional table entry
*/
Optional<PolarisTableEntity> findByDbNameAndTblName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Checks if table with the database name and table name exists.
* @param dbName database name of the table to be looked up.
* @param tblName table name to be looked up.
* @return true, if table exists. false, otherwise.
*/
boolean existsByDbNameAndTblName(
@Param("dbName") final String dbName,
@Param("tblName") final String tblName);
/**
* Fetch table entities in database.
* @param dbName database name
* @param tableNamePrefix table name prefix. can be empty.
* @param page pageable.
* @return table entities that belong to the database.
*/
@Query("SELECT e FROM PolarisTableEntity e WHERE e.dbName = :dbName AND e.tblName LIKE :tableNamePrefix%")
Slice<PolarisTableEntity> findAllTablesByDbNameAndTablePrefix(
@Param("dbName") final String dbName,
@Param("tableNamePrefix") final String tableNamePrefix,
Pageable page);
/**
* Do an atomic compare-and-swap on the metadata location of the table.
* @param dbName database name of the table
* @param tableName table name
* @param expectedLocation expected metadata location before the update is done.
* @param newLocation new metadata location of the table.
* @param lastModifiedBy user updating the location.
* @param lastModifiedDate timestamp for when the location was updated.
* @return number of rows that are updated.
*/
@Modifying(flushAutomatically = true, clearAutomatically = true)
@Query("UPDATE PolarisTableEntity t SET t.metadataLocation = :newLocation, "
+ "t.audit.lastModifiedBy = :lastModifiedBy, t.audit.lastModifiedDate = :lastModifiedDate, "
+ "t.previousMetadataLocation = t.metadataLocation, t.version = t.version + 1 "
+ "WHERE t.metadataLocation = :expectedLocation AND t.dbName = :dbName AND t.tblName = :tableName")
@Transactional
int updateMetadataLocation(
@Param("dbName") final String dbName,
@Param("tableName") final String tableName,
@Param("expectedLocation") final String expectedLocation,
@Param("newLocation") final String newLocation,
@Param("lastModifiedBy") final String lastModifiedBy,
@Param("lastModifiedDate") final Instant lastModifiedDate);
}
| 2,053 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/repos/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris repo classes.
*/
package com.netflix.metacat.connector.polaris.store.repos;
| 2,054 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/PolarisTableEntity.java
|
package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.hibernate.annotations.GenericGenerator;
import org.springframework.data.jpa.domain.support.AuditingEntityListener;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Version;
/**
* Entity class for Table object.
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
@Builder(toBuilder = true)
@EqualsAndHashCode
@Entity
@ToString(callSuper = true)
@Table(name = "TBLS")
@EntityListeners(AuditingEntityListener.class)
public class PolarisTableEntity {
@Version
private Long version;
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
private String tblId;
@Basic
@Column(name = "db_name", nullable = false, updatable = false)
private String dbName;
@Basic
@Setter
@Column(name = "tbl_name", nullable = false)
private String tblName;
@Basic
@Setter
@Column(name = "previous_metadata_location", nullable = true, updatable = true)
private String previousMetadataLocation;
@Basic
@Setter
@Column(name = "metadata_location", nullable = true, updatable = true)
private String metadataLocation;
@Embedded
private AuditEntity audit;
/**
* Constructor for Polaris Table Entity.
*
* @param dbName database name
* @param tblName table name
* @param createdBy user that created this entity.
*/
public PolarisTableEntity(final String dbName,
final String tblName,
final String createdBy) {
this.dbName = dbName;
this.tblName = tblName;
this.audit = AuditEntity
.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
}
}
| 2,055 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/PolarisDatabaseEntity.java
|
package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.ToString;
import org.hibernate.annotations.GenericGenerator;
import org.springframework.data.jpa.domain.support.AuditingEntityListener;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.Entity;
import javax.persistence.EntityListeners;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Version;
/**
* Entity class for Database object.
*/
@Getter
@AllArgsConstructor
@NoArgsConstructor
@Builder(toBuilder = true)
@EqualsAndHashCode
@Entity
@ToString(callSuper = true)
@Table(name = "DBS")
@EntityListeners(AuditingEntityListener.class)
public class PolarisDatabaseEntity {
@Version
private Long version;
@Basic
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(name = "id", nullable = false, unique = true, updatable = false)
private String dbId;
@Basic
@Column(name = "name", nullable = false, unique = true, updatable = false)
private String dbName;
@Basic
@Column(name = "location", updatable = false)
private String location;
@Embedded
private AuditEntity audit;
/**
* Constructor for Polaris Database Entity.
*
* @param dbName database name
* @param location database location.
* @param createdBy user that created this entity.
*/
public PolarisDatabaseEntity(final String dbName,
final String location,
final String createdBy) {
this.dbName = dbName;
this.location = location;
this.audit = AuditEntity
.builder()
.createdBy(createdBy)
.lastModifiedBy(createdBy)
.build();
}
}
| 2,056 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/AuditEntity.java
|
package com.netflix.metacat.connector.polaris.store.entities;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.springframework.data.annotation.CreatedDate;
import org.springframework.data.annotation.LastModifiedDate;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import java.time.Instant;
/**
* Embeddable audit entity.
*
* @author rveeramacheneni
*/
@Embeddable
@Getter
@Setter
@Builder
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode
@ToString(of = {
"createdBy",
"lastModifiedBy",
"createdDate",
"lastModifiedDate"
})
public class AuditEntity {
@Basic
@Column(name = "created_by")
private String createdBy;
@Basic
@Column(name = "last_updated_by")
private String lastModifiedBy;
@Basic
@Column(name = "created_date", updatable = false, nullable = false)
@CreatedDate
private Instant createdDate;
@Basic
@Column(name = "last_updated_date", nullable = false)
@LastModifiedDate
private Instant lastModifiedDate;
}
| 2,057 |
0 |
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store
|
Create_ds/metacat/metacat-connector-polaris/src/main/java/com/netflix/metacat/connector/polaris/store/entities/package-info.java
|
/*
*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Polaris entity classes.
*/
package com.netflix.metacat.connector.polaris.store.entities;
| 2,058 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeExceptionMapper.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import java.sql.SQLException;
/**
* Exception mapper for Snowflake SQLExceptions.
*
* @author amajumdar
* @see SQLException
* @see ConnectorException
* @since 1.2.0
*/
public class SnowflakeExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
final SQLException se,
final QualifiedName name
) {
final int errorCode = se.getErrorCode();
switch (errorCode) {
case 2042: //database already exists
return new DatabaseAlreadyExistsException(name, se);
case 2002: //table already exists
return new TableAlreadyExistsException(name, se);
case 2043: //database does not exist
return new DatabaseNotFoundException(name, se);
case 2003: //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 2,059 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorModule.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import javax.sql.DataSource;
import java.util.Map;
/**
* Guice module for the Snowflake Connector.
*
* @author amajumdar
* @since 1.2.0
*/
public class SnowflakeConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName unique catalog shard name
* @param configuration connector configuration
*
*/
public SnowflakeConnectorModule(
final String catalogShardName,
final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(SnowflakeTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(SnowflakeExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, JdbcConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, SnowflakeConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 2,060 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeTypeConverter.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.extern.slf4j.Slf4j;
/**
* Type converter for Snowflake.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class SnowflakeTypeConverter extends JdbcTypeConverter {
static final int DEFAULT_CHARACTER_LENGTH = 256;
private static final String DEFAULT_CHARACTER_LENGTH_STRING = Integer.toString(DEFAULT_CHARACTER_LENGTH);
/**
* {@inheritDoc}
*
* @see <a href="https://docs.snowflake.net/manuals/sql-reference/data-types.html">Snowflake Types</a>
*/
@Override
public Type toMetacatType(final String type) {
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "smallint":
case "tinyint":
case "byteint":
return BaseType.SMALLINT;
case "int":
case "integer":
return BaseType.INT;
case "bigint":
return BaseType.BIGINT;
case "number":
case "decimal":
case "numeric":
return this.toMetacatDecimalType(splitType);
case "real":
case "float4":
return BaseType.FLOAT;
case "double":
case "double precision":
case "float8":
case "float":
return BaseType.DOUBLE;
case "varchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarcharType(splitType);
case "text":
case "string":
// text is basically alias for VARCHAR(256)
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatVarcharType(splitType);
case "character":
case "char":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatCharType(splitType);
case "binary":
case "varbinary":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarbinaryType(splitType);
case "timestamp":
case "datetime":
case "timestamp_ntz":
case "timestampntz":
case "timestamp without time zone":
return this.toMetacatTimestampType(splitType);
case "timestamp_tz":
case "timestamptz":
case "timestampltz":
case "timestamp_ltz":
case "timestamp with local time zone":
case "timestamp with time zone":
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
case "date":
return BaseType.DATE;
case "boolean":
return BaseType.BOOLEAN;
default:
log.info("Unhandled or unknown Snowflake type {}", splitType[0]);
return BaseType.UNKNOWN;
}
}
private void fixDataSizeIfIncorrect(final String[] splitType) {
//
// Adding a hack to ignore errors for data type with negative size.
// TODO: Remove this hack when we have a solution for the above.
//
if (splitType[1] == null || Integer.parseInt(splitType[1]) <= 0) {
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("Snowflake doesn't support array types");
case BIGINT:
return "NUMBER(38)";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected CHAR type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
return "CHAR(" + charType.getLength() + ")";
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
case FLOAT:
return "DOUBLE PRECISION";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Snowflake doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Snowflake doesn't support interval types");
case JSON:
throw new UnsupportedOperationException("Snowflake doesn't support JSON types");
case MAP:
throw new UnsupportedOperationException("Snowflake doesn't support MAP types");
case ROW:
throw new UnsupportedOperationException("Snowflake doesn't support ROW types");
case SMALLINT:
return "SMALLINT";
case STRING:
return "STRING";
case TIME:
case TIME_WITH_TIME_ZONE:
return "TIME";
case TIMESTAMP:
return "TIMESTAMP";
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMPTZ";
case TINYINT:
return "SMALLINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
return "VARBINARY";
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
return "VARCHAR(" + varcharType.getLength() + ")";
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 2,061 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorPlugin.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Snowflake Connector Plugin.
*
* @author amajumdar
* @since 1.2.0
*/
public class SnowflakeConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "snowflake";
private static final SnowflakeTypeConverter TYPE_CONVERTER = new SnowflakeTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new SnowflakeConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 2,062 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorTableService.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorUtils;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
/**
* Snowflake table service implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@Slf4j
public class SnowflakeConnectorTableService extends JdbcConnectorTableService {
private static final String COL_CREATED = "CREATED";
private static final String COL_LAST_ALTERED = "LAST_ALTERED";
private static final String SQL_GET_AUDIT_INFO
= "select created, last_altered from information_schema.tables"
+ " where table_schema=? and table_name=?";
private static final String JDBC_UNDERSCORE = "_";
private static final String JDBC_ESCAPE_UNDERSCORE = "\\_";
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public SnowflakeConnectorTableService(
final DataSource dataSource,
final JdbcTypeConverter typeConverter,
final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, typeConverter, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
super.delete(context, getSnowflakeName(name));
}
/**
* Returns the snowflake represented name which is always uppercase.
*
* @param name qualified name
* @return qualified name
*/
private QualifiedName getSnowflakeName(final QualifiedName name) {
return name.cloneWithUpperCase();
}
/**
* Returns a normalized string that escapes JDBC special characters like "_".
*
* @param input object name.
* @return the normalized string.
*/
private static String getJdbcNormalizedSnowflakeName(final String input) {
if (!StringUtils.isBlank(input) && input.contains(JDBC_UNDERSCORE)) {
return StringUtils.replace(input, JDBC_UNDERSCORE, JDBC_ESCAPE_UNDERSCORE);
}
return input;
}
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
return super.get(context, getSnowflakeName(name));
}
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return super.list(context, getSnowflakeName(name), prefix, sort, pageable);
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable) {
return super.listNames(context, name.cloneWithUpperCase(), prefix, sort, pageable);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(@Nonnull final ConnectorRequestContext context,
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
super.rename(context, getSnowflakeName(oldName), getSnowflakeName(newName));
}
@Override
protected Connection getConnection(@Nonnull @NonNull final String schema) throws SQLException {
final Connection connection = this.dataSource.getConnection();
connection.setSchema(connection.getCatalog());
return connection;
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
boolean result = false;
final QualifiedName sName = getSnowflakeName(name);
try (Connection connection = this.dataSource.getConnection()) {
final ResultSet rs = getTables(connection, sName, sName, false);
if (rs.next()) {
result = true;
}
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
return result;
}
@Override
protected ResultSet getColumns(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name
) throws SQLException {
try {
return connection.getMetaData().getColumns(
connection.getCatalog(),
getJdbcNormalizedSnowflakeName(name.getDatabaseName()),
getJdbcNormalizedSnowflakeName(name.getTableName()),
JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
);
} catch (SQLException e) {
throw this.exceptionMapper.toConnectorException(e, name);
}
}
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = getSnowflakeName(tableInfo.getName());
try (
PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)
) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getTableName());
try (ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo =
AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATED))
.lastModifiedDate(resultSet.getDate(COL_LAST_ALTERED)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
/**
* {@inheritDoc}
*/
@Override
protected ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix
) throws SQLException {
return getTables(connection, name, prefix, true);
}
private ResultSet getTables(
@Nonnull @NonNull final Connection connection,
@Nonnull @NonNull final QualifiedName name,
@Nullable final QualifiedName prefix,
final boolean multiCharacterSearch
) throws SQLException {
final String schema = getJdbcNormalizedSnowflakeName(name.getDatabaseName());
final DatabaseMetaData metaData = connection.getMetaData();
return prefix == null || StringUtils.isEmpty(prefix.getTableName())
? metaData.getTables(connection.getCatalog(), schema, null, TABLE_TYPES)
: metaData
.getTables(
connection.getCatalog(),
schema,
multiCharacterSearch ? getJdbcNormalizedSnowflakeName(prefix.getTableName())
+ JdbcConnectorUtils.MULTI_CHARACTER_SEARCH
: getJdbcNormalizedSnowflakeName(prefix.getTableName()),
TABLE_TYPES
);
}
}
| 2,063 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/package-info.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes for the Snowflake Connector implementation.
*
* @author amajumdar
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.snowflake;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,064 |
0 |
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-snowflake/src/main/java/com/netflix/metacat/connector/snowflake/SnowflakeConnectorFactory.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.snowflake;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import java.util.Map;
/**
* Connector Factory for Snowflake.
*
* @author amajumdar
* @since 1.2.0
*/
class SnowflakeConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
SnowflakeConnectorFactory(
final String name,
final String catalogShardName,
final Map<String, String> configuration
) {
super(name, catalogShardName,
Lists.newArrayList(new SnowflakeConnectorModule(catalogShardName, configuration)));
}
}
| 2,065 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlServiceUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.FileSystems;
import java.util.Properties;
import java.util.Set;
/**
* MySqlServiceUtil.
*
* @author zhenl
* @since 1.1.0
*/
public final class MySqlServiceUtil {
private MySqlServiceUtil() {
}
/**
* Returns the list of string having the input ids.
*
* @param jdbcTemplate jdbc template
* @param sql query sql
* @param item identifier
* @return list of results
*/
public static Set<String> getValues(final JdbcTemplate jdbcTemplate,
final String sql,
final Object item) {
try {
return jdbcTemplate.query(sql, rs -> {
final Set<String> result = Sets.newHashSet();
while (rs.next()) {
result.add(rs.getString("value"));
}
return result;
}, item);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
}
}
/**
* load mysql data source.
*
* @param dataSourceManager data source manager to use
* @param configLocation usermetadata config location
* @throws Exception exception to throw
*/
public static void loadMySqlDataSource(final DataSourceManager dataSourceManager,
final String configLocation) throws Exception {
URL url = Thread.currentThread().getContextClassLoader().getResource(configLocation);
if (url == null) {
url = FileSystems.getDefault().getPath(configLocation).toUri().toURL();
}
final Properties connectionProperties = new Properties();
try (InputStream is = url.openStream()) {
connectionProperties.load(is);
} catch (Exception e) {
throw new Exception(String.format("Unable to read from user metadata config file %s", configLocation), e);
}
dataSourceManager.load(UserMetadataService.NAME_DATASOURCE, connectionProperties);
}
}
| 2,066 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlLookupService.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.base.Joiner;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.model.Lookup;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import org.springframework.transaction.annotation.Transactional;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.Set;
import java.util.stream.Collectors;
/**
* User metadata service impl using Mysql.
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MySqlLookupService implements LookupService {
private static final String SQL_GET_LOOKUP =
"select id, name, type, created_by createdBy, last_updated_by lastUpdatedBy, date_created dateCreated,"
+ " last_updated lastUpdated from lookup where name=?";
private static final String SQL_INSERT_LOOKUP =
"insert into lookup( name, version, type, created_by, last_updated_by, date_created, last_updated)"
+ " values (?,0,?,?,?,now(),now())";
private static final String SQL_INSERT_LOOKUP_VALUES =
"insert into lookup_values( lookup_id, values_string) values (?,?)";
private static final String SQL_DELETE_LOOKUP_VALUES =
"delete from lookup_values where lookup_id=? and values_string in (%s)";
private static final String SQL_GET_LOOKUP_VALUES =
"select values_string value from lookup_values where lookup_id=?";
private static final String SQL_GET_LOOKUP_VALUES_BY_NAME =
"select lv.values_string value from lookup l, lookup_values lv where l.id=lv.lookup_id and l.name=?";
private static final String STRING_TYPE = "string";
private final Config config;
private JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param config config
* @param jdbcTemplate jdbc template
*/
public MySqlLookupService(final Config config, final JdbcTemplate jdbcTemplate) {
this.config = config;
this.jdbcTemplate = jdbcTemplate;
}
/**
* Returns the lookup for the given <code>name</code>.
*
* @param name lookup name
* @return lookup
*/
@Override
@Transactional(readOnly = true)
public Lookup get(final String name) {
try {
return jdbcTemplate.queryForObject(
SQL_GET_LOOKUP,
new Object[]{name}, new int[]{Types.VARCHAR},
(rs, rowNum) -> {
final Lookup lookup = new Lookup();
lookup.setId(rs.getLong("id"));
lookup.setName(rs.getString("name"));
lookup.setType(rs.getString("type"));
lookup.setCreatedBy(rs.getString("createdBy"));
lookup.setLastUpdated(rs.getDate("lastUpdated"));
lookup.setLastUpdatedBy(rs.getString("lastUpdatedBy"));
lookup.setDateCreated(rs.getDate("dateCreated"));
lookup.setValues(getValues(rs.getLong("id")));
return lookup;
});
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
final String message = String.format("Failed to get the lookup for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the value of the lookup name.
*
* @param name lookup name
* @return scalar lookup value
*/
@Override
@Transactional(readOnly = true)
public String getValue(final String name) {
String result = null;
final Set<String> values = getValues(name);
if (values != null && values.size() > 0) {
result = values.iterator().next();
}
return result;
}
/**
* Returns the list of values of the lookup name.
*
* @param lookupId lookup id
* @return list of lookup values
*/
@Override
@Transactional(readOnly = true)
public Set<String> getValues(final Long lookupId) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_LOOKUP_VALUES, lookupId);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the lookup values for id %s", lookupId);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of values of the lookup name.
*
* @param name lookup name
* @return list of lookup values
*/
@Override
@Transactional(readOnly = true)
public Set<String> getValues(final String name) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_LOOKUP_VALUES_BY_NAME, name);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param values multiple values
* @return returns the lookup with the given name.
*/
@Override
public Lookup setValues(final String name, final Set<String> values) {
try {
final Lookup lookup = findOrCreateLookupByName(name);
final Set<String> inserts;
Set<String> deletes = Sets.newHashSet();
final Set<String> lookupValues = lookup.getValues();
if (lookupValues == null || lookupValues.isEmpty()) {
inserts = values;
} else {
inserts = Sets.difference(values, lookupValues).immutableCopy();
deletes = Sets.difference(lookupValues, values).immutableCopy();
}
lookup.setValues(values);
if (!inserts.isEmpty()) {
insertLookupValues(lookup.getId(), inserts);
}
if (!deletes.isEmpty()) {
deleteLookupValues(lookup.getId(), deletes);
}
return lookup;
} catch (Exception e) {
final String message = String.format("Failed to set the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
private void insertLookupValues(final Long id, final Set<String> inserts) {
jdbcTemplate.batchUpdate(SQL_INSERT_LOOKUP_VALUES, inserts.stream().map(insert -> new Object[]{id, insert})
.collect(Collectors.toList()), new int[]{Types.BIGINT, Types.VARCHAR});
}
private void deleteLookupValues(final Long id, final Set<String> deletes) {
jdbcTemplate.update(
String.format(SQL_DELETE_LOOKUP_VALUES, "'" + Joiner.on("','").skipNulls().join(deletes) + "'"),
new SqlParameterValue(Types.BIGINT, id));
}
/**
* findOrCreateLookupByName.
*
* @param name name to find or create
* @return Look up object
* @throws SQLException sql exception
*/
private Lookup findOrCreateLookupByName(final String name) throws SQLException {
Lookup lookup = get(name);
if (lookup == null) {
final KeyHolder holder = new GeneratedKeyHolder();
jdbcTemplate.update(connection -> {
final PreparedStatement ps = connection.prepareStatement(SQL_INSERT_LOOKUP,
Statement.RETURN_GENERATED_KEYS);
ps.setString(1, name);
ps.setString(2, STRING_TYPE);
ps.setString(3, config.getLookupServiceUserAdmin());
ps.setString(4, config.getLookupServiceUserAdmin());
return ps;
}, holder);
final Long lookupId = holder.getKey().longValue();
lookup = new Lookup();
lookup.setName(name);
lookup.setId(lookupId);
}
return lookup;
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param values multiple values
* @return returns the lookup with the given name.
*/
@Override
public Lookup addValues(final String name, final Set<String> values) {
try {
final Lookup lookup = findOrCreateLookupByName(name);
final Set<String> inserts;
final Set<String> lookupValues = lookup.getValues();
if (lookupValues == null || lookupValues.isEmpty()) {
inserts = values;
lookup.setValues(values);
} else {
inserts = Sets.difference(values, lookupValues);
}
if (!inserts.isEmpty()) {
insertLookupValues(lookup.getId(), inserts);
}
return lookup;
} catch (Exception e) {
final String message = String.format("Failed to set the lookup values for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Saves the lookup value.
*
* @param name lookup name
* @param value lookup value
* @return returns the lookup with the given name.
*/
@Override
public Lookup setValue(final String name, final String value) {
return setValues(name, Sets.newHashSet(value));
}
}
| 2,067 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlTagService.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.model.Lookup;
import com.netflix.metacat.common.server.model.TagItem;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.jdbc.support.GeneratedKeyHolder;
import org.springframework.jdbc.support.KeyHolder;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nullable;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Tag service implementation.
*
* @author amajumdar
* @author zhenl
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MySqlTagService implements TagService {
/**
* Lookup name for tag.
*/
private static final String LOOKUP_NAME_TAG = "tag";
private static final String NAME_TAGS = "tags";
private static final String QUERY_LIST =
"select distinct i.name from tag_item i, tag_item_tags t where i.id=t.tag_item_id"
+ " and (1=? or t.tags_string in (%s) ) and (1=? or i.name like ?) and (1=? or i.name rlike ?)";
private static final String QUERY_SEARCH =
"select distinct i.name from tag_item i, tag_item_tags t where i.id=t.tag_item_id"
+ " and (1=? or t.tags_string %s ) and (1=? or i.name like ?)";
private static final String SQL_GET_TAG_ITEM =
"select id, name, created_by createdBy, last_updated_by lastUpdatedBy, date_created dateCreated,"
+ " last_updated lastUpdated from tag_item where name=?";
private static final String SQL_INSERT_TAG_ITEM =
"insert into tag_item( name, version, created_by, last_updated_by, date_created, last_updated)"
+ " values (?,0,?,?,now(),now())";
private static final String SQL_UPDATE_TAG_ITEM =
"update tag_item set name=?, last_updated=now() where name=?";
private static final String SQL_INSERT_TAG_ITEM_TAGS =
"insert into tag_item_tags( tag_item_id, tags_string) values (?,?)";
private static final String SQL_DELETE_TAG_ITEM =
"delete from tag_item where name=?";
private static final String SQL_DELETE_TAG_ITEM_TAGS_BY_NAME =
"delete from tag_item_tags where tag_item_id=(select id from tag_item where name=?)";
private static final String SQL_DELETE_TAG_ITEM_TAGS_BY_NAME_TAGS =
"delete from tag_item_tags where tag_item_id=(select id from tag_item where name=?) and tags_string in (%s)";
private static final String SQL_DELETE_TAG_ITEM_TAGS =
"delete from tag_item_tags where tag_item_id=(?) and tags_string in (%s)";
private static final String SQL_GET_TAG_ITEM_TAGS =
"select tags_string value from tag_item_tags where tag_item_id=?";
private static final String EMPTY_CLAUSE = "''";
// private static final String SQL_GET_LOOKUP_VALUES_BY_NAME =
// "select lv.tags_string value from tag_item l, tag_item_tags lv where l.id=lv.tag_item_id and l.name=?";
private static final int MAX_TAGS_LIST_COUNT = 16;
private final Config config;
private final LookupService lookupService;
private final MetacatJson metacatJson;
private final UserMetadataService userMetadataService;
private JdbcTemplate jdbcTemplate;
/**
* Constructor.
*
* @param config config
* @param jdbcTemplate JDBC template
* @param lookupService lookup service
* @param metacatJson json util
* @param userMetadataService user metadata service
*/
public MySqlTagService(
final Config config,
final JdbcTemplate jdbcTemplate,
final LookupService lookupService,
final MetacatJson metacatJson,
final UserMetadataService userMetadataService
) {
this.config = Preconditions.checkNotNull(config, "config is required");
this.jdbcTemplate = jdbcTemplate;
this.lookupService = Preconditions.checkNotNull(lookupService, "lookupService is required");
this.metacatJson = Preconditions.checkNotNull(metacatJson, "metacatJson is required");
this.userMetadataService = Preconditions.checkNotNull(userMetadataService, "userMetadataService is required");
}
private Lookup addTags(final Set<String> tags) {
try {
return lookupService.addValues(LOOKUP_NAME_TAG, tags);
} catch (Exception e) {
final String message = String.format("Failed adding the tags %s", tags);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Get the tag item.
*
* @param name name
* @return tag item
*/
public TagItem get(final QualifiedName name) {
return get(name.toString());
}
/**
* Returns the TagItem for the given <code>name</code>.
*
* @param name tag name
* @return TagItem
*/
@Transactional(readOnly = true)
public TagItem get(final String name) {
try {
return jdbcTemplate.queryForObject(
SQL_GET_TAG_ITEM,
new Object[]{name}, new int[]{Types.VARCHAR},
(rs, rowNum) -> {
final TagItem tagItem = new TagItem();
tagItem.setId(rs.getLong("id"));
tagItem.setName(rs.getString("name"));
tagItem.setCreatedBy(rs.getString("createdBy"));
tagItem.setLastUpdated(rs.getDate("lastUpdated"));
tagItem.setLastUpdatedBy(rs.getString("lastUpdatedBy"));
tagItem.setDateCreated(rs.getDate("dateCreated"));
tagItem.setValues(getValues(rs.getLong("id")));
return tagItem;
});
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
final String message = String.format("Failed to get the tag for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of tags of the tag item id.
*
* @param tagItemId tag item id
* @return list of tags
*/
private Set<String> getValues(final Long tagItemId) {
try {
return MySqlServiceUtil.getValues(jdbcTemplate, SQL_GET_TAG_ITEM_TAGS, tagItemId);
} catch (EmptyResultDataAccessException e) {
return Sets.newHashSet();
} catch (Exception e) {
final String message = String.format("Failed to get the tags for id %s", tagItemId);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* findOrCreateTagItemByName.
*
* @param name name to find or create
* @return Tag Item
* @throws SQLException sql exception
*/
private TagItem findOrCreateTagItemByName(final String name) throws SQLException {
TagItem result = get(name);
if (result == null) {
final KeyHolder holder = new GeneratedKeyHolder();
jdbcTemplate.update(connection -> {
final PreparedStatement ps = connection.prepareStatement(SQL_INSERT_TAG_ITEM,
Statement.RETURN_GENERATED_KEYS);
ps.setString(1, name);
ps.setString(2, config.getTagServiceUserAdmin());
ps.setString(3, config.getTagServiceUserAdmin());
return ps;
}, holder);
final Long id = holder.getKey().longValue();
result = new TagItem();
result.setName(name);
result.setId(id);
}
return result;
}
@Override
public void renameTableTags(final QualifiedName name, final String newTableName) {
try {
final QualifiedName newName = QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(),
newTableName);
if (get(newName) != null) {
delete(newName, false /*don't delete existing definition metadata with the new name*/);
}
jdbcTemplate.update(SQL_UPDATE_TAG_ITEM, new String[]{newName.toString(), name.toString()},
new int[]{Types.VARCHAR, Types.VARCHAR});
} catch (Exception e) {
final String message = String.format("Failed to rename item name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void delete(final QualifiedName name, final boolean updateUserMetadata) {
try {
jdbcTemplate
.update(SQL_DELETE_TAG_ITEM_TAGS_BY_NAME, new SqlParameterValue(Types.VARCHAR, name.toString()));
jdbcTemplate.update(SQL_DELETE_TAG_ITEM, new SqlParameterValue(Types.VARCHAR, name.toString()));
if (updateUserMetadata) {
// Set the tags in user metadata
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, Sets.newHashSet());
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to delete all tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* remove.
*
* @param name qualifiedName
* @param tags tags
* @param updateUserMetadata flag to update user metadata
*/
public void remove(final QualifiedName name, final Set<String> tags, final boolean updateUserMetadata) {
try {
final TagItem tagItem = get(name);
if (tagItem == null || tagItem.getValues().isEmpty()) {
log.info(String.format("No tags or tagItems found for table %s", name));
return;
}
final List<SqlParameterValue> params = Lists.newArrayList();
params.add(new SqlParameterValue(Types.VARCHAR, name.toString()));
jdbcTemplate.update(String.format(SQL_DELETE_TAG_ITEM_TAGS_BY_NAME_TAGS,
buildParametrizedInClause(tags, params, params.size())),
params.toArray());
if (updateUserMetadata) {
tagItem.getValues().removeAll(tags);
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, tagItem.getValues());
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to remove tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* Returns the list of tags.
*
* @return list of tag names
*/
@Override
@Transactional(readOnly = true)
public Set<String> getTags() {
return lookupService.getValues(LOOKUP_NAME_TAG);
}
/**
* Returns the list of <code>QualifiedName</code> of items that are tagged by the
* given <code>includeTags</code> and do not contain the given <code>excludeTags</code>.
*
* @param includeTags include items that contain tags
* @param excludeTags include items that do not contain tags
* @param sourceName catalog/source name
* @param databaseName database name
* @param tableName table name
* @param type metacat data category
* @return list of qualified names of the items
*/
@Override
@Transactional(readOnly = true)
public List<QualifiedName> list(
@Nullable final Set<String> includeTags,
@Nullable final Set<String> excludeTags,
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName,
@Nullable final QualifiedName.Type type
) {
Set<String> includedNames = Sets.newHashSet();
final Set<String> excludedNames = Sets.newHashSet();
final String wildCardName =
QualifiedName.qualifiedNameToWildCardQueryString(sourceName, databaseName, tableName);
final Set<String> localIncludes = includeTags != null ? includeTags : Sets.newHashSet();
validateRequestTagCount(localIncludes);
try {
includedNames.addAll(queryTaggedItems(wildCardName, type, localIncludes));
if (excludeTags != null && !excludeTags.isEmpty()) {
excludedNames.addAll(queryTaggedItems(wildCardName, type, excludeTags));
}
} catch (Exception e) {
final String message = String.format("Failed getting the list of qualified names for tags %s", includeTags);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
if (excludeTags != null && !excludeTags.isEmpty()) {
includedNames = Sets.difference(includedNames, excludedNames);
}
return includedNames.stream().map(s -> QualifiedName.fromString(s, false)).collect(Collectors.toList());
}
/**
* Returns the list of <code>QualifiedName</code> of items that have tags containing the given tag text.
*
* @param tag partial text of a tag
* @param sourceName source/catalog name
* @param databaseName database name
* @param tableName table name
* @return list of qualified names of the items
*/
@Override
@Transactional(readOnly = true)
public List<QualifiedName> search(
@Nullable final String tag,
@Nullable final String sourceName,
@Nullable final String databaseName,
@Nullable final String tableName
) {
final Set<String> result = Sets.newHashSet();
try {
final String wildCardName =
QualifiedName.qualifiedNameToWildCardQueryString(sourceName, databaseName, tableName);
//Includes
final String query = String.format(QUERY_SEARCH, "like ?");
final Object[] params = {tag == null ? 1 : 0, tag + "%", wildCardName == null ? 1 : 0, wildCardName};
result.addAll(jdbcTemplate.query(query, params,
new int[]{Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.VARCHAR},
(rs, rowNum) -> rs.getString("name")));
} catch (Exception e) {
final String message = String.format("Failed getting the list of qualified names for tag %s", tag);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result.stream().map(QualifiedName::fromString).collect(Collectors.toList());
}
/**
* Tags the given table with the given <code>tags</code>.
*
* @param name resource name
* @param tags list of tags
* @return return the complete list of tags associated with the table
*/
@Override
public Set<String> setTags(final QualifiedName name, final Set<String> tags,
final boolean updateUserMetadata) {
addTags(tags);
try {
final TagItem tagItem = findOrCreateTagItemByName(name.toString());
final Set<String> inserts;
Set<String> deletes = Sets.newHashSet();
Set<String> values = tagItem.getValues();
if (values == null || values.isEmpty()) {
inserts = tags;
} else {
inserts = Sets.difference(tags, values).immutableCopy();
deletes = Sets.difference(values, tags).immutableCopy();
}
values = tags;
if (!inserts.isEmpty()) {
insertTagItemTags(tagItem.getId(), inserts);
}
if (!deletes.isEmpty()) {
removeTagItemTags(tagItem.getId(), deletes);
}
if (updateUserMetadata) {
// Set the tags in user metadata
final Map<String, Set<String>> data = Maps.newHashMap();
data.put(NAME_TAGS, values);
userMetadataService
.saveDefinitionMetadata(name, "admin", Optional.of(metacatJson.toJsonObject(data)),
true);
}
} catch (Exception e) {
final String message = String.format("Failed to remove tags for name %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return tags;
}
private void removeTagItemTags(final Long id, final Set<String> tags) {
final List<SqlParameterValue> params = Lists.newArrayList();
params.add(new SqlParameterValue(Types.BIGINT, id));
jdbcTemplate
.update(String.format(SQL_DELETE_TAG_ITEM_TAGS, buildParametrizedInClause(
tags,
params,
params.size()
)), params.toArray());
}
private void insertTagItemTags(final Long id, final Set<String> tags) {
jdbcTemplate.batchUpdate(SQL_INSERT_TAG_ITEM_TAGS, tags.stream().map(tag -> new Object[]{id, tag})
.collect(Collectors.toList()), new int[]{Types.BIGINT, Types.VARCHAR});
}
/**
* Removes the tags from the given resource.
*
* @param name qualified name
* @param deleteAll if true, will delete all tags associated with the given table
* @param tags list of tags to be removed for the given table
*/
@Override
public void removeTags(final QualifiedName name, final Boolean deleteAll,
final Set<String> tags, final boolean updateUserMetadata) {
if (deleteAll != null && deleteAll) {
delete(name, updateUserMetadata);
} else {
remove(name, tags, updateUserMetadata);
}
}
private List<String> queryTaggedItems(final String name,
final QualifiedName.Type type,
final Set<String> tags) {
final List<SqlParameterValue> sqlParams = Lists.newArrayList();
sqlParams.add(new SqlParameterValue(Types.INTEGER, tags.size() == 0 ? 1 : 0));
final String query = String.format(QUERY_LIST,
buildParametrizedInClause(tags, sqlParams, sqlParams.size()));
sqlParams.addAll(Stream.of(
new SqlParameterValue(Types.INTEGER, name == null ? 1 : 0),
new SqlParameterValue(Types.VARCHAR, name),
new SqlParameterValue(Types.INTEGER, type == null ? 1 : 0),
new SqlParameterValue(Types.VARCHAR, type == null ? ".*" : type.getRegexValue())
).collect(Collectors.toList()));
return jdbcTemplate.query(query,
sqlParams.toArray(),
(rs, rowNum) -> rs.getString("name"));
}
private static String buildParametrizedInClause(final Set<String> tags,
final List<SqlParameterValue> params,
final int index) {
final String tagList = tags.stream().filter(StringUtils::isNotBlank)
.map(v -> "?").collect(Collectors.joining(", "));
params.addAll(index, tags.stream().filter(StringUtils::isNotBlank)
.map(p -> new SqlParameterValue(Types.VARCHAR, p))
.collect(Collectors.toList()));
return StringUtils.isBlank(tagList) ? EMPTY_CLAUSE : tagList;
}
private static void validateRequestTagCount(final Set<String> tags) {
final int totalTags = tags.size();
if (totalTags > MAX_TAGS_LIST_COUNT) {
throw new MetacatBadRequestException(String.format("Too many tags in request. Count %s", totalTags));
}
}
}
| 2,068 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MySqlUserMetadataConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptorImpl;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.DataSourceManager;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
/**
* MySql UserMetadata Config.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@ConditionalOnProperty(value = "metacat.mysqlmetadataservice.enabled", havingValue = "true")
public class MySqlUserMetadataConfig {
/**
* business Metadata Manager.
* @return business Metadata Manager
*/
@Bean
@ConditionalOnMissingBean(MetadataInterceptor.class)
public MetadataInterceptor businessMetadataManager(
) {
return new MetadataInterceptorImpl();
}
/**
* User Metadata service.
*
* @param jdbcTemplate JDBC template
* @param config System config to use
* @param metacatJson Json Utilities to use
* @param metadataInterceptor business metadata manager
* @return User metadata service based on MySql
*/
@Bean
public UserMetadataService userMetadataService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config,
final MetacatJson metacatJson,
final MetadataInterceptor metadataInterceptor
) {
return new MysqlUserMetadataService(jdbcTemplate, metacatJson, config, metadataInterceptor);
}
/**
* Lookup service.
*
* @param jdbcTemplate JDBC template
* @param config System configuration to use
* @return Lookup service backed by MySQL
*/
@Bean
public LookupService lookupService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config) {
return new MySqlLookupService(config, jdbcTemplate);
}
/**
* The tag service to use.
*
* @param jdbcTemplate JDBC template
* @param config System config to use
* @param metacatJson Json Utilities to use
* @param lookupService Look up service implementation to use
* @param userMetadataService User metadata service implementation to use
* @return The tag service implementation backed by MySQL
*/
@Bean
public TagService tagService(
@Qualifier("metadataJdbcTemplate") final JdbcTemplate jdbcTemplate,
final Config config,
final MetacatJson metacatJson,
final LookupService lookupService,
final UserMetadataService userMetadataService
) {
return new MySqlTagService(config, jdbcTemplate, lookupService, metacatJson, userMetadataService);
}
/**
* mySql DataSource.
*
* @param dataSourceManager data source manager
* @param metacatProperties metacat properties
* @return data source
* @throws Exception exception
*/
@Bean
public DataSource metadataDataSource(final DataSourceManager dataSourceManager,
final MetacatProperties metacatProperties) throws Exception {
MySqlServiceUtil.loadMySqlDataSource(dataSourceManager,
metacatProperties.getUsermetadata().getConfig().getLocation());
return dataSourceManager.get(UserMetadataService.NAME_DATASOURCE);
}
/**
* mySql metadata Transaction Manager.
*
* @param mySqlDataSource metadata data source
* @return metadata transaction manager
*/
@Bean
public DataSourceTransactionManager metadataTxManager(
@Qualifier("metadataDataSource") final DataSource mySqlDataSource) {
return new DataSourceTransactionManager(mySqlDataSource);
}
/**
* mySql metadata JDBC template.
*
* @param mySqlDataSource metadata data source
* @param config System config to use
* @return metadata JDBC template
*/
@Bean
public JdbcTemplate metadataJdbcTemplate(
@Qualifier("metadataDataSource") final DataSource mySqlDataSource,
final Config config) {
final JdbcTemplate result = new JdbcTemplate(mySqlDataSource);
result.setQueryTimeout(config.getMetadataQueryTimeout());
return result;
}
}
| 2,069 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/MysqlUserMetadataService.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.metadata.mysql;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DefinitionMetadataDto;
import com.netflix.metacat.common.dto.HasDataMetadata;
import com.netflix.metacat.common.dto.HasDefinitionMetadata;
import com.netflix.metacat.common.dto.HasMetadata;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetadataException;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.BaseUserMetadataService;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.common.server.usermetadata.MetadataInterceptor;
import com.netflix.metacat.common.server.usermetadata.UserMetadataServiceException;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Data;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.jdbc.core.SqlParameterValue;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.sql.Types;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* User metadata service.
* <p>
* Definition metadata (business metadata about the logical schema definition) is stored in two tables. Definition
* metadata about the partitions are stored in 'partition_definition_metadata' table. Definition metadata about the
* catalogs, databases and tables are stored in 'definition_metadata' table.
* <p>
* Data metadata (metadata about the data stored in the location referred by the schema). This information is stored in
* 'data_metadata' table.
*/
@Slf4j
@SuppressFBWarnings
@Transactional("metadataTxManager")
public class MysqlUserMetadataService extends BaseUserMetadataService {
private static final String NAME_OWNER = "owner";
private static final String NAME_USERID = "userId";
private static final List<String> DEFINITION_METADATA_SORT_BY_COLUMNS = Arrays.asList(
"id", "date_created", "created_by", "last_updated_by", "name", "last_updated");
private static final List<String> VALID_SORT_ORDER = Arrays.asList("ASC", "DESC");
private final MetacatJson metacatJson;
private final Config config;
private JdbcTemplate jdbcTemplate;
private final MetadataInterceptor metadataInterceptor;
/**
* Constructor.
*
* @param jdbcTemplate jdbc template
* @param metacatJson json utility
* @param config config
* @param metadataInterceptor metadata interceptor
*/
public MysqlUserMetadataService(
final JdbcTemplate jdbcTemplate,
final MetacatJson metacatJson,
final Config config,
final MetadataInterceptor metadataInterceptor
) {
this.metacatJson = metacatJson;
this.config = config;
this.jdbcTemplate = jdbcTemplate;
this.metadataInterceptor = metadataInterceptor;
}
@Override
public void saveMetadata(final String userId, final HasMetadata holder, final boolean merge) {
super.saveMetadata(userId, holder, merge);
}
@Override
public void populateMetadata(final HasMetadata holder, final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
super.populateMetadata(holder, definitionMetadata, dataMetadata);
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDefinitionMetadataWithInterceptor(
@Nonnull final QualifiedName name,
final GetMetadataInterceptorParameters getMetadataInterceptorParameters) {
//not applying interceptor
final Optional<ObjectNode> retData = getDefinitionMetadata(name);
retData.ifPresent(objectNode ->
this.metadataInterceptor.onRead(this, name, objectNode, getMetadataInterceptorParameters));
return retData;
}
@Override
public void softDeleteDataMetadata(
final String user,
@Nonnull final List<String> uris
) {
try {
final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
for (List<String> subUris : subLists) {
_softDeleteDataMetadata(user, subUris);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the data metadata for %s", uris);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteDataMetadata(
@Nonnull final List<String> uris
) {
deleteDataMetadatasWithBatch(uris, true);
}
@Override
public void deleteDataMetadataDeletes(
@Nonnull final List<String> uris
) {
deleteDataMetadatasWithBatch(uris, false);
}
private void deleteDataMetadatasWithBatch(final List<String> uris, final boolean removeDataMetadata) {
try {
final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
for (List<String> subUris : subLists) {
_deleteDataMetadata(subUris, removeDataMetadata);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the data metadata for %s", uris);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteDefinitionMetadata(
@Nonnull final List<QualifiedName> names
) {
try {
final List<List<QualifiedName>> subLists =
Lists.partition(names, config.getUserMetadataMaxInClauseItems());
for (List<QualifiedName> subNames : subLists) {
_deleteDefinitionMetadata(subNames);
}
} catch (Exception e) {
final String message = String.format("Failed deleting the definition metadata for %s", names);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteStaleDefinitionMetadata(
@NonNull final String qualifiedNamePattern,
@NonNull final Date lastUpdated) {
if (qualifiedNamePattern == null || lastUpdated == null) {
return;
}
try {
jdbcTemplate.update(SQL.DELETE_DEFINITION_METADATA_STALE, new Object[]{qualifiedNamePattern, lastUpdated},
new int[]{Types.VARCHAR, Types.TIMESTAMP});
} catch (Exception e) {
final String message = String.format("Failed to delete stale definition metadata for pattern %s",
qualifiedNamePattern);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void deleteMetadata(final String userId, final List<HasMetadata> holders) {
try {
final List<List<HasMetadata>> subLists =
Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
for (List<HasMetadata> hasMetadatas : subLists) {
final List<QualifiedName> names = hasMetadatas.stream()
.filter(m -> m instanceof HasDefinitionMetadata)
.map(m -> ((HasDefinitionMetadata) m).getDefinitionName())
.collect(Collectors.toList());
if (!names.isEmpty()) {
_deleteDefinitionMetadata(names);
}
if (config.canSoftDeleteDataMetadata()) {
final List<String> uris = hasMetadatas.stream()
.filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
.map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
if (!uris.isEmpty()) {
_softDeleteDataMetadata(userId, uris);
}
}
}
} catch (Exception e) {
log.error("Failed deleting metadatas", e);
throw new UserMetadataServiceException("Failed deleting metadatas", e);
}
}
/**
* delete Definition Metadatas.
*
* @param names names to delete
*/
@SuppressWarnings("checkstyle:methodname")
private void _deleteDefinitionMetadata(
@Nullable final List<QualifiedName> names
) {
if (names != null && !names.isEmpty()) {
final SqlParameterValue[] aNames = names.stream().filter(name -> !name.isPartitionDefinition())
.map(n -> new SqlParameterValue(Types.VARCHAR, n))
.toArray(SqlParameterValue[]::new);
final SqlParameterValue[] aPartitionNames = names.stream().filter(QualifiedName::isPartitionDefinition)
.map(n -> new SqlParameterValue(Types.VARCHAR, n))
.toArray(SqlParameterValue[]::new);
if (aNames.length > 0) {
final List<String> paramVariables = Arrays.stream(aNames).map(s -> "?").collect(Collectors.toList());
jdbcTemplate.update(
String.format(SQL.DELETE_DEFINITION_METADATA, Joiner.on(",").skipNulls().join(paramVariables)),
(Object[]) aNames);
}
if (aPartitionNames.length > 0) {
final List<String> paramVariables =
Arrays.stream(aPartitionNames).map(s -> "?").collect(Collectors.toList());
jdbcTemplate.update(
String.format(SQL.DELETE_PARTITION_DEFINITION_METADATA,
Joiner.on(",").skipNulls().join(paramVariables)), (Object[]) aPartitionNames);
}
}
}
/**
* soft Delete Data Metadatas.
*
* @param userId user id
* @param uris uri list
*/
@SuppressWarnings("checkstyle:methodname")
private void _softDeleteDataMetadata(final String userId,
@Nullable final List<String> uris) {
if (uris != null && !uris.isEmpty()) {
final List<String> paramVariables = uris.stream().map(s -> "?").collect(Collectors.toList());
final String[] aUris = uris.toArray(new String[0]);
final String paramString = Joiner.on(",").skipNulls().join(paramVariables);
final List<Long> ids = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_IDS, paramString), aUris, (rs, rowNum) -> rs.getLong("id"));
if (!ids.isEmpty()) {
final List<String> idParamVariables = ids.stream().map(s -> "?").collect(Collectors.toList());
final Long[] aIds = ids.toArray(new Long[0]);
final String idParamString = Joiner.on(",").skipNulls().join(idParamVariables);
final List<Long> dupIds = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_DELETE_BY_IDS, idParamString), aIds,
(rs, rowNum) -> rs.getLong("id"));
if (!dupIds.isEmpty()) {
ids.removeAll(dupIds);
}
final List<Object[]> deleteDataMetadatas = Lists.newArrayList();
ids.forEach(id -> deleteDataMetadatas.add(new Object[]{id, userId}));
final int[] colTypes = {Types.BIGINT, Types.VARCHAR};
jdbcTemplate.batchUpdate(SQL.SOFT_DELETE_DATA_METADATA, deleteDataMetadatas, colTypes);
}
}
}
/**
* delete Data Metadatas.
*
* @param uris uri list
* @param removeDataMetadata flag to remove data meta data
*/
@SuppressWarnings("checkstyle:methodname")
private void _deleteDataMetadata(
@Nullable final List<String> uris,
final boolean removeDataMetadata
) {
if (uris != null && !uris.isEmpty()) {
final List<String> paramVariables = uris.stream().map(s -> "?").collect(Collectors.toList());
final String[] aUris = uris.toArray(new String[0]);
final String paramString = Joiner.on(",").skipNulls().join(paramVariables);
final List<Long> ids = jdbcTemplate
.query(String.format(SQL.GET_DATA_METADATA_IDS, paramString), aUris, (rs, rowNum) -> rs.getLong("id"));
if (!ids.isEmpty()) {
final List<String> idParamVariables = ids.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] aIds = ids.stream().map(id -> new SqlParameterValue(Types.BIGINT, id))
.toArray(SqlParameterValue[]::new);
final String idParamString = Joiner.on(",").skipNulls().join(idParamVariables);
jdbcTemplate.update(String.format(SQL.DELETE_DATA_METADATA_DELETE, idParamString), (Object[]) aIds);
if (removeDataMetadata) {
jdbcTemplate.update(String.format(SQL.DELETE_DATA_METADATA, idParamString), (Object[]) aIds);
}
}
}
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDataMetadata(
@Nonnull final String uri) {
return getJsonForKey(SQL.GET_DATA_METADATA, uri);
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Map<String, ObjectNode> getDataMetadataMap(
@Nonnull final List<String> uris) {
final Map<String, ObjectNode> result = Maps.newHashMap();
if (!uris.isEmpty()) {
final List<List<String>> parts = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
parts.forEach(keys -> result.putAll(_getMetadataMap(keys, SQL.GET_DATA_METADATAS)));
}
return result;
}
@Nonnull
@Override
@Transactional(readOnly = true)
public Optional<ObjectNode> getDefinitionMetadata(
@Nonnull final QualifiedName name) {
final Optional<ObjectNode> retData = getJsonForKey(
name.isPartitionDefinition() ? SQL.GET_PARTITION_DEFINITION_METADATA : SQL.GET_DEFINITION_METADATA,
name.toString());
return retData;
}
@Override
@Transactional(readOnly = true)
public List<QualifiedName> getDescendantDefinitionNames(@Nonnull final QualifiedName name) {
final List<String> result;
try {
result = jdbcTemplate
.query(SQL.GET_DESCENDANT_DEFINITION_NAMES, new Object[]{name.toString() + "/%"},
new int[]{Types.VARCHAR},
(rs, rowNum) -> rs.getString("name"));
} catch (Exception e) {
final String message = String.format("Failed to get descendant names for %s", name);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result.stream().map(QualifiedName::fromString).collect(Collectors.toList());
}
@Override
@Transactional(readOnly = true)
public List<String> getDescendantDataUris(@Nonnull final String uri) {
final List<String> result;
try {
result = jdbcTemplate.query(SQL.GET_DESCENDANT_DATA_URIS, new Object[]{uri + "/%"},
new int[]{Types.VARCHAR},
(rs, rowNum) -> rs.getString("uri"));
} catch (Exception e) {
final String message = String.format("Failed to get descendant uris for %s", uri);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result;
}
//TODO: For partition metadata, add interceptor if needed
@Nonnull
@Override
@Transactional(readOnly = true)
public Map<String, ObjectNode> getDefinitionMetadataMap(
@Nonnull final List<QualifiedName> names) {
//
// names can contain partition names and non-partition names. Since definition metadata is stored in two tables,
// metadata needs to be retrieved from both the tables.
//
final List<QualifiedName> oNames = names.stream().filter(name -> !name.isPartitionDefinition()).collect(
Collectors.toList());
final List<QualifiedName> partitionNames = names.stream().filter(QualifiedName::isPartitionDefinition).collect(
Collectors.toList());
final Map<String, ObjectNode> result = Maps.newHashMap();
if (!oNames.isEmpty()) {
result.putAll(_getNonPartitionDefinitionMetadataMap(oNames));
}
if (!partitionNames.isEmpty()) {
result.putAll(_getPartitionDefinitionMetadata(partitionNames));
}
return result;
}
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getNonPartitionDefinitionMetadataMap(final List<QualifiedName> names) {
final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
return parts.parallelStream()
.map(keys -> _getMetadataMap(keys, SQL.GET_DEFINITION_METADATAS))
.flatMap(it -> it.entrySet().stream())
.collect(Collectors.toConcurrentMap(it -> QualifiedName.fromString(it.getKey()).toString(),
Map.Entry::getValue));
}
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getPartitionDefinitionMetadata(final List<QualifiedName> names) {
final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
return parts.parallelStream()
.map(keys -> _getMetadataMap(keys, SQL.GET_PARTITION_DEFINITION_METADATAS))
.flatMap(it -> it.entrySet().stream())
.collect(Collectors.toConcurrentMap(it -> QualifiedName.fromString(it.getKey()).toString(),
Map.Entry::getValue));
}
/**
* get Metadata Map.
*
* @param keys list of keys
* @param sql query string
* @return map of the metadata
*/
@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getMetadataMap(@Nullable final List<?> keys, final String sql) {
final Map<String, ObjectNode> result = Maps.newHashMap();
if (keys == null || keys.isEmpty()) {
return result;
}
final List<String> paramVariables = keys.stream().map(s -> "?").collect(Collectors.toList());
final SqlParameterValue[] aKeys = keys.stream().map(o -> new SqlParameterValue(Types.VARCHAR, o.toString()))
.toArray(SqlParameterValue[]::new);
final String query = String.format(sql, Joiner.on(","
+ "").join(paramVariables));
try {
final ResultSetExtractor<Void> handler = resultSet -> {
while (resultSet.next()) {
final String json = resultSet.getString("data");
final String name = resultSet.getString("name");
if (json != null) {
try {
result.put(name, metacatJson.parseJsonObject(json));
} catch (MetacatJsonException e) {
log.error("Invalid json '{}' for name '{}'", json, name);
throw new UserMetadataServiceException(
String.format("Invalid json %s for name %s", json, name), e);
}
}
}
return null;
};
jdbcTemplate.query(query, aKeys, handler);
} catch (Exception e) {
final String message = String.format("Failed to get data for %s", keys);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
return result;
}
/**
* get Json for key.
*
* @param query query string
* @param keyValue parameters
* @return result object node
*/
private Optional<ObjectNode> getJsonForKey(final String query, final String keyValue) {
try {
ResultSetExtractor<Optional<ObjectNode>> handler = rs -> {
final String json;
Optional<ObjectNode> result = Optional.empty();
while (rs.next()) {
final String key = rs.getString(1);
if (keyValue.equalsIgnoreCase(key)) {
json = rs.getString(2);
if (Strings.isNullOrEmpty(json)) {
return Optional.empty();
}
result = Optional.ofNullable(metacatJson.parseJsonObject(json));
break;
}
}
return result;
};
return jdbcTemplate.query(query, new String[]{keyValue}, new int[]{Types.VARCHAR}, handler);
} catch (MetacatJsonException e) {
final String message = String.format("Invalid json %s for name %s", e.getInputJson(), keyValue);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
} catch (Exception e) {
final String message = String.format("Failed to get data for %s", keyValue);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
/**
* executeUpdateForKey.
*
* @param query sql query string
* @param keyValues parameters
* @return number of updated rows
*/
private int executeUpdateForKey(final String query, final String... keyValues) {
try {
final SqlParameterValue[] values =
Arrays.stream(keyValues).map(keyValue -> new SqlParameterValue(Types.VARCHAR, keyValue))
.toArray(SqlParameterValue[]::new);
return jdbcTemplate.update(query, (Object[]) values);
} catch (Exception e) {
final String message = String.format("Failed to save data for %s", Arrays.toString(keyValues));
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
private void throwIfPartitionDefinitionMetadataDisabled() {
if (config.disablePartitionDefinitionMetadata()) {
throw new MetacatBadRequestException("Partition Definition metadata updates are disabled");
}
}
@Override
public void saveDataMetadata(
@Nonnull final String uri,
@Nonnull final String userId,
@Nonnull final Optional<ObjectNode> metadata, final boolean merge) {
final Optional<ObjectNode> existingData = getDataMetadata(uri);
final int count;
if (existingData.isPresent() && metadata.isPresent()) {
final ObjectNode merged = existingData.get();
if (merge) {
metacatJson.mergeIntoPrimary(merged, metadata.get());
}
count = executeUpdateForKey(SQL.UPDATE_DATA_METADATA, merged.toString(), userId, uri);
} else {
count = metadata.map(
jsonNodes -> executeUpdateForKey(SQL.INSERT_DATA_METADATA, jsonNodes.toString(), userId, userId, uri))
.orElse(1);
}
if (count != 1) {
throw new IllegalStateException("Expected one row to be insert or update for " + uri);
}
}
@Override
public void saveDefinitionMetadata(
@Nonnull final QualifiedName name,
@Nonnull final String userId,
@Nonnull final Optional<ObjectNode> metadata, final boolean merge)
throws InvalidMetadataException {
final Optional<ObjectNode> existingData = getDefinitionMetadata(name);
final int count;
if (existingData.isPresent() && metadata.isPresent()) {
ObjectNode merged = existingData.get();
if (merge) {
metacatJson.mergeIntoPrimary(merged, metadata.get());
} else {
merged = metadata.get();
}
//apply interceptor to change the object node
this.metadataInterceptor.onWrite(this, name, merged);
String query;
if (name.isPartitionDefinition()) {
throwIfPartitionDefinitionMetadataDisabled();
query = SQL.UPDATE_PARTITION_DEFINITION_METADATA;
} else {
query = SQL.UPDATE_DEFINITION_METADATA;
}
count = executeUpdateForKey(
query,
merged.toString(),
userId,
name.toString());
} else {
//apply interceptor to change the object node
if (metadata.isPresent()) {
this.metadataInterceptor.onWrite(this, name, metadata.get());
}
String queryToExecute;
if (name.isPartitionDefinition()) {
throwIfPartitionDefinitionMetadataDisabled();
queryToExecute = SQL.INSERT_PARTITION_DEFINITION_METADATA;
} else {
queryToExecute = SQL.INSERT_DEFINITION_METADATA;
}
count = metadata.map(jsonNodes -> executeUpdateForKey(
queryToExecute,
jsonNodes.toString(),
userId,
userId,
name.toString()
)).orElse(1);
}
if (count != 1) {
throw new IllegalStateException("Expected one row to be insert or update for " + name);
}
}
@Override
public int renameDataMetadataKey(
@Nonnull final String oldUri,
@Nonnull final String newUri) {
return executeUpdateForKey(SQL.RENAME_DATA_METADATA, newUri, oldUri);
}
@Override
public int renameDefinitionMetadataKey(
@Nonnull final QualifiedName oldName,
@Nonnull final QualifiedName newName) {
_deleteDefinitionMetadata(Lists.newArrayList(newName));
return executeUpdateForKey(SQL.RENAME_DEFINITION_METADATA, newName.toString(), oldName.toString());
}
@Override
public void saveMetadata(final String user, final List<? extends HasMetadata> metadatas, final boolean merge) {
try {
@SuppressWarnings("unchecked") final List<List<HasMetadata>> subLists = Lists.partition(
(List<HasMetadata>) metadatas,
config.getUserMetadataMaxInClauseItems()
);
for (List<HasMetadata> hasMetadatas : subLists) {
final List<String> uris = Lists.newArrayList();
final List<QualifiedName> names = Lists.newArrayList();
// Get the names and uris
final List<HasDefinitionMetadata> definitionMetadatas = Lists.newArrayList();
final List<HasDataMetadata> dataMetadatas = Lists.newArrayList();
hasMetadatas.forEach(hasMetadata -> {
if (hasMetadata instanceof HasDefinitionMetadata) {
final HasDefinitionMetadata oDef = (HasDefinitionMetadata) hasMetadata;
names.add(oDef.getDefinitionName());
if (oDef.getDefinitionMetadata() != null) {
definitionMetadatas.add(oDef);
}
}
if (hasMetadata instanceof HasDataMetadata) {
final HasDataMetadata oData = (HasDataMetadata) hasMetadata;
if (oData.isDataExternal() && oData.getDataMetadata() != null
&& oData.getDataMetadata().size() > 0) {
uris.add(oData.getDataUri());
dataMetadatas.add(oData);
}
}
});
if (!definitionMetadatas.isEmpty() || !dataMetadatas.isEmpty()) {
// Get the existing metadata based on the names and uris
final Map<String, ObjectNode> definitionMap = getDefinitionMetadataMap(names);
final Map<String, ObjectNode> dataMap = getDataMetadataMap(uris);
// Curate the list of existing and new metadatas
final List<Object[]> insertDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> updateDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> insertPartitionDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> updatePartitionDefinitionMetadatas = Lists.newArrayList();
final List<Object[]> insertDataMetadatas = Lists.newArrayList();
final List<Object[]> updateDataMetadatas = Lists.newArrayList();
definitionMetadatas.forEach(oDef -> {
final QualifiedName qualifiedName = oDef.getDefinitionName();
if (qualifiedName != null && oDef.getDefinitionMetadata() != null
&& oDef.getDefinitionMetadata().size() != 0) {
final String name = qualifiedName.toString();
final ObjectNode oNode = definitionMap.get(name);
if (oNode == null) {
final Object[] o = new Object[]{
metacatJson.toJsonString(oDef.getDefinitionMetadata()), user, user, name, };
if (qualifiedName.isPartitionDefinition()) {
insertPartitionDefinitionMetadatas.add(o);
} else {
insertDefinitionMetadatas.add(o);
}
} else {
metacatJson.mergeIntoPrimary(oNode, oDef.getDefinitionMetadata());
final Object[] o = new Object[]{metacatJson.toJsonString(oNode), user, name};
if (qualifiedName.isPartitionDefinition()) {
updatePartitionDefinitionMetadatas.add(o);
} else {
updateDefinitionMetadatas.add(o);
}
}
}
});
dataMetadatas.forEach(oData -> {
final String uri = oData.getDataUri();
final ObjectNode oNode = dataMap.get(uri);
if (oData.getDataMetadata() != null && oData.getDataMetadata().size() != 0) {
if (oNode == null) {
insertDataMetadatas.add(
new Object[]{
metacatJson.toJsonString(oData.getDataMetadata()),
user,
user,
uri,
}
);
} else {
metacatJson.mergeIntoPrimary(oNode, oData.getDataMetadata());
updateDataMetadatas
.add(new Object[]{metacatJson.toJsonString(oNode), user, uri});
}
}
});
if (!insertDefinitionMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.INSERT_DEFINITION_METADATA, insertDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updateDefinitionMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.UPDATE_DEFINITION_METADATA, updateDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!insertPartitionDefinitionMetadatas.isEmpty()) {
throwIfPartitionDefinitionMetadataDisabled();
jdbcTemplate.batchUpdate(SQL.INSERT_PARTITION_DEFINITION_METADATA,
insertPartitionDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updatePartitionDefinitionMetadatas.isEmpty()) {
throwIfPartitionDefinitionMetadataDisabled();
jdbcTemplate.batchUpdate(SQL.UPDATE_PARTITION_DEFINITION_METADATA,
updatePartitionDefinitionMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!insertDataMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.INSERT_DATA_METADATA, insertDataMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
if (!updateDataMetadatas.isEmpty()) {
jdbcTemplate.batchUpdate(SQL.UPDATE_DATA_METADATA, updateDataMetadatas,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.VARCHAR});
}
}
}
} catch (Exception e) {
log.error("Failed to save metadata", e);
throw new UserMetadataServiceException("Failed to save metadata", e);
}
}
@Override
@Transactional(readOnly = true)
public List<DefinitionMetadataDto> searchDefinitionMetadata(
@Nullable final Set<String> propertyNames,
@Nullable final String type,
@Nullable final String name,
@Nullable final HasMetadata holder,
@Nullable final String sortBy,
@Nullable final String sortOrder,
@Nullable final Integer offset,
@Nullable final Integer limit
) {
final List<DefinitionMetadataDto> result = Lists.newArrayList();
final SearchMetadataQuery queryObj = new SearchMetadataQuery(SQL.SEARCH_DEFINITION_METADATAS)
.buildSearchMetadataQuery(
propertyNames,
type,
name,
sortBy,
sortOrder,
offset,
limit
);
try {
// Handler for reading the result set
final ResultSetExtractor<Void> handler = rs -> {
while (rs.next()) {
final String definitionName = rs.getString("name");
final String data = rs.getString("data");
final DefinitionMetadataDto definitionMetadataDto = new DefinitionMetadataDto();
definitionMetadataDto.setName(QualifiedName.fromString(definitionName));
definitionMetadataDto.setDefinitionMetadata(metacatJson.parseJsonObject(data));
result.add(definitionMetadataDto);
}
return null;
};
jdbcTemplate.query(queryObj.getSearchQuery().toString(), queryObj.getSearchParamList().toArray(), handler);
} catch (Exception e) {
log.error("Failed to search definition data", e);
throw new UserMetadataServiceException("Failed to search definition data", e);
}
return result;
}
@Override
@Transactional(readOnly = true)
public List<QualifiedName> searchByOwners(final Set<String> owners) {
final List<QualifiedName> result = Lists.newArrayList();
final StringBuilder query = new StringBuilder(SQL.SEARCH_DEFINITION_METADATA_NAMES);
final List<SqlParameterValue> paramList = Lists.newArrayList();
query.append(" where 1=0");
owners.forEach(s -> {
query.append(" or data like ?");
paramList.add(new SqlParameterValue(Types.VARCHAR, "%\"userId\":\"" + s.trim() + "\"%"));
});
final SqlParameterValue[] params = new SqlParameterValue[paramList.size()];
try {
// Handler for reading the result set
final ResultSetExtractor<Void> handler = rs -> {
while (rs.next()) {
final String definitionName = rs.getString("name");
result.add(QualifiedName.fromString(definitionName, false));
}
return null;
};
jdbcTemplate.query(query.toString(), paramList.toArray(params), handler);
} catch (Exception e) {
log.error("Failed to search by owners", e);
throw new UserMetadataServiceException("Failed to search by owners", e);
}
return result;
}
@Override
@Transactional(readOnly = true)
public List<String> getDeletedDataMetadataUris(final Date deletedPriorTo, final Integer offset,
final Integer limit) {
try {
return jdbcTemplate.query(String.format(SQL.GET_DELETED_DATA_METADATA_URI, offset, limit),
new Object[]{deletedPriorTo}, new int[]{Types.TIMESTAMP}, (rs, rowNum) -> rs.getString("uri"));
} catch (Exception e) {
final String message =
String.format("Failed to get deleted data metadata uris deleted prior to %s", deletedPriorTo);
log.error(message, e);
throw new UserMetadataServiceException(message, e);
}
}
@Override
public void populateOwnerIfMissing(final HasDefinitionMetadata holder, final String owner) {
ObjectNode definitionMetadata = holder.getDefinitionMetadata();
if (definitionMetadata == null) {
definitionMetadata = metacatJson.emptyObjectNode();
holder.setDefinitionMetadata(definitionMetadata);
}
final ObjectNode ownerNode = definitionMetadata.with(NAME_OWNER);
final JsonNode userId = ownerNode.get(NAME_USERID);
if (userId == null || Strings.isNullOrEmpty(userId.textValue())) {
ownerNode.put(NAME_USERID, owner);
}
}
/**
* Inner help class for generating the search definition/business metadata.
*/
@Data
class SearchMetadataQuery {
private StringBuilder searchQuery;
private List<SqlParameterValue> searchParamList = Lists.newArrayList();
SearchMetadataQuery(final String querySQL) {
this.searchQuery = new StringBuilder(querySQL);
}
SearchMetadataQuery buildSearchMetadataQuery(@Nullable final Set<String> propertyNames,
@Nullable final String type,
@Nullable final String name,
@Nullable final String sortByStr,
@Nullable final String sortOrderStr,
@Nullable final Integer offset,
@Nullable final Integer limit) {
String sortBy = null;
if (StringUtils.isNotBlank(sortByStr)) {
sortBy = sortByStr.trim().toLowerCase();
if (!DEFINITION_METADATA_SORT_BY_COLUMNS.contains(sortBy)) {
throw new IllegalArgumentException(String.format("Invalid sortBy column %s", sortBy));
}
}
String sortOrder = null;
if (StringUtils.isNotBlank(sortOrderStr)) {
sortOrder = sortOrderStr.trim().toUpperCase();
if (!VALID_SORT_ORDER.contains(sortOrder)) {
throw new IllegalArgumentException("Invalid sort order. Expected ASC or DESC");
}
}
if (type != null) {
String typeRegex = null;
switch (type) {
case "catalog":
typeRegex = "^[^/]*$";
break;
case "database":
typeRegex = "^[^/]*/[^/]*$";
break;
case "table":
typeRegex = "^[^/]*/[^/]*/[^/]*$";
break;
case "partition":
typeRegex = "^[^/]*/[^/]*/[^/]*/.*$";
break;
default:
}
if (typeRegex != null) {
this.searchQuery.append(" and name rlike ?");
this.searchParamList.add(new SqlParameterValue(Types.VARCHAR, typeRegex));
}
}
if (propertyNames != null && !propertyNames.isEmpty()) {
propertyNames.forEach(propertyName -> {
this.searchQuery.append(" and data like ?");
searchParamList.add(new SqlParameterValue(Types.VARCHAR, "%\"" + propertyName + "\":%"));
});
}
if (!Strings.isNullOrEmpty(name)) {
this.searchQuery.append(" and name like ?");
this.searchParamList.add(new SqlParameterValue(Types.VARCHAR, name));
}
if (!Strings.isNullOrEmpty(sortBy)) {
this.searchQuery.append(" order by ").append(sortBy);
if (!Strings.isNullOrEmpty(sortOrder)) {
this.searchQuery.append(" ").append(sortOrder);
}
}
if (limit != null) {
this.searchQuery.append(" limit ");
if (offset != null) {
this.searchQuery.append(offset).append(",");
}
this.searchQuery.append(limit);
}
return this;
}
}
protected static class SQL {
static final String SOFT_DELETE_DATA_METADATA =
"insert into data_metadata_delete(id, created_by,date_created) values (?,?, now())";
static final String GET_DATA_METADATA_IDS =
"select id from data_metadata where uri in (%s)";
static final String GET_DATA_METADATA_DELETE_BY_IDS =
"select id from data_metadata_delete where id in (%s)";
static final String DELETE_DATA_METADATA_DELETE =
"delete from data_metadata_delete where id in (%s)";
static final String DELETE_DATA_METADATA =
"delete from data_metadata where id in (%s)";
static final String DELETE_DEFINITION_METADATA =
"delete from definition_metadata where name in (%s)";
static final String DELETE_DEFINITION_METADATA_STALE =
"delete from definition_metadata where name like ? and last_updated < ?";
static final String DELETE_PARTITION_DEFINITION_METADATA =
"delete from partition_definition_metadata where name in (%s)";
static final String GET_DATA_METADATA =
"select uri name, data from data_metadata where uri=?";
static final String GET_DELETED_DATA_METADATA_URI =
"select uri from data_metadata_delete dmd join data_metadata dm on dmd.id=dm.id"
+ " where dmd.date_created < ? limit %d,%d";
static final String GET_DESCENDANT_DATA_URIS =
"select uri from data_metadata where uri like ?";
static final String GET_DESCENDANT_DEFINITION_NAMES =
"select name from partition_definition_metadata where name like ?";
static final String GET_DATA_METADATAS =
"select uri name,data from data_metadata where uri in (%s)";
static final String GET_DEFINITION_METADATA =
"select name, data from definition_metadata where name=?";
static final String GET_PARTITION_DEFINITION_METADATA =
"select name, data from partition_definition_metadata where name=?";
static final String GET_DEFINITION_METADATAS =
"select name,data from definition_metadata where name in (%s)";
static final String GET_PARTITION_DEFINITION_METADATAS =
"select name,data from partition_definition_metadata where name in (%s)";
static final String SEARCH_DEFINITION_METADATAS =
"select name,data from definition_metadata where 1=1";
static final String SEARCH_DEFINITION_METADATA_NAMES =
"select name from definition_metadata";
static final String INSERT_DATA_METADATA = "insert into data_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, uri) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String INSERT_DEFINITION_METADATA = "insert into definition_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, name) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String INSERT_PARTITION_DEFINITION_METADATA = "insert into partition_definition_metadata "
+ "(data, created_by, last_updated_by, date_created, last_updated, version, name) values "
+ "(?, ?, ?, now(), now(), 0, ?)";
static final String RENAME_DATA_METADATA = "update data_metadata set uri=? where uri=?";
static final String RENAME_DEFINITION_METADATA = "update definition_metadata set name=? where name=?";
static final String UPDATE_DATA_METADATA =
"update data_metadata set data=?, last_updated=now(), last_updated_by=? where uri=?";
static final String UPDATE_DEFINITION_METADATA =
"update definition_metadata set data=?, last_updated=now(), last_updated_by=? where name=?";
static final String UPDATE_PARTITION_DEFINITION_METADATA =
"update partition_definition_metadata set data=?, last_updated=now(), last_updated_by=? where name=?";
}
}
| 2,070 |
0 |
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata
|
Create_ds/metacat/metacat-metadata-mysql/src/main/java/com/netflix/metacat/metadata/mysql/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package includes user metadata service classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.metadata.mysql;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,071 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import java.util.Map;
/**
* Connector Factory for Redshift.
*
* @author tgianos
* @since 1.0.0
*/
class RedshiftConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
RedshiftConnectorFactory(
final String name,
final String catalogShardName,
final Map<String, String> configuration
) {
super(name, catalogShardName, Lists.newArrayList(new RedshiftConnectorModule(catalogShardName, configuration)));
}
}
| 2,072 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Redshift Connector Plugin.
*
* @author tgianos
* @since 1.0.0
*/
public class RedshiftConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "redshift";
private static final RedshiftTypeConverter TYPE_CONVERTER = new RedshiftTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new RedshiftConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 2,073 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.extern.slf4j.Slf4j;
/**
* Type converter for Redshift.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class RedshiftTypeConverter extends JdbcTypeConverter {
static final int DEFAULT_CHARACTER_LENGTH = 256;
private static final String DEFAULT_CHARACTER_LENGTH_STRING = Integer.toString(DEFAULT_CHARACTER_LENGTH);
/**
* {@inheritDoc}
*
* @see <a href="http://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html">Redshift Types</a>
* @see <a href="http://docs.aws.amazon.com/redshift/latest/dg/c_unsupported-postgresql-datatypes.html">
* Unsupported PostgreSQL Types
* </a>
*/
@Override
public Type toMetacatType(final String type) {
// See: https://www.postgresql.org/docs/current/static/datatype.html
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
switch (splitType[0]) {
case "smallint":
case "int2":
return BaseType.SMALLINT;
case "int":
case "integer":
case "int4":
return BaseType.INT;
case "int8":
case "bigint":
case "oid":
return BaseType.BIGINT;
case "decimal":
case "numeric":
return this.toMetacatDecimalType(splitType);
case "real":
case "float4":
return BaseType.FLOAT;
case "double precision":
case "float8":
case "float":
return BaseType.DOUBLE;
case "character varying":
case "varchar":
case "nvarchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatVarcharType(splitType);
case "text":
case "name":
// text is basically alias for VARCHAR(256)
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatVarcharType(splitType);
case "character":
case "char":
case "nchar":
fixDataSizeIfIncorrect(splitType);
return this.toMetacatCharType(splitType);
case "bpchar":
// bpchar defaults to fixed length of 256 characters
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
return this.toMetacatCharType(splitType);
case "timestamp":
return this.toMetacatTimestampType(splitType);
case "timestampz":
return BaseType.TIMESTAMP_WITH_TIME_ZONE;
case "date":
return BaseType.DATE;
case "boolean":
case "bool":
return BaseType.BOOLEAN;
default:
// see: http://docs.aws.amazon.com/redshift/latest/dg/c_unsupported-postgresql-datatypes.html
log.info("Unhandled or unknown Redshift type {}", splitType[0]);
return BaseType.UNKNOWN;
}
}
private void fixDataSizeIfIncorrect(final String[] splitType) {
//
// Adding a hack to ignore errors for data type with negative size.
// TODO: Remove this hack when we have a solution for the above.
//
if (splitType[1] == null || Integer.parseInt(splitType[1]) <= 0) {
splitType[1] = DEFAULT_CHARACTER_LENGTH_STRING;
}
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
throw new UnsupportedOperationException("Redshift doesn't support array types");
case BIGINT:
return "BIGINT";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected CHAR type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
return "CHAR(" + charType.getLength() + ")";
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "DECIMAL(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
case FLOAT:
return "DOUBLE PRECISION";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
throw new UnsupportedOperationException("Redshift doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
throw new UnsupportedOperationException("Redshift doesn't support interval types");
case JSON:
throw new UnsupportedOperationException("Redshift doesn't support JSON types");
case MAP:
throw new UnsupportedOperationException("Redshift doesn't support MAP types");
case ROW:
throw new UnsupportedOperationException("Redshift doesn't support ROW types");
case SMALLINT:
return "SMALLINT";
case STRING:
throw new UnsupportedOperationException("Redshift doesn't support STRING types");
case TIME:
case TIME_WITH_TIME_ZONE:
throw new UnsupportedOperationException("Redshift doesn't support TIME types");
case TIMESTAMP:
return "TIMESTAMP";
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMPZ";
case TINYINT:
// NOTE: There is no tiny int type in Redshift so using slightly larger SMALLINT
return "SMALLINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
throw new UnsupportedOperationException("Redshift doesn't support VARBINARY types");
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
// NOTE: PostgreSQL lets you store up to 1GB in a varchar field which is about the same as TEXT
return "VARCHAR(" + varcharType.getLength() + ")";
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
}
| 2,074 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import java.sql.SQLException;
/**
* Exception mapper for Redshift SQLExceptions.
*
* @author tgianos
* @author zhenl
* @see SQLException
* @see ConnectorException
* @see <a href="https://www.postgresql.org/docs/current/static/errcodes-appendix.html">PostgreSQL Ref</a>
* @since 1.0.0
*/
public class RedshiftExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
final SQLException se,
final QualifiedName name
) {
// TODO: For now as can't find documentation stating contrary this is a copy of PostgreSQL implementation.
// Source code looks pretty unclear too at cursory glance
final String sqlState = se.getSQLState();
if (sqlState == null) {
throw new ConnectorException(se.getMessage(), se);
}
switch (sqlState) {
case "42P04": //database already exists
return new DatabaseAlreadyExistsException(name, se);
case "42P07": //table already exists
return new TableAlreadyExistsException(name, se);
case "3D000":
case "3F000": //database does not exist
return new DatabaseNotFoundException(name, se);
case "42P01": //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 2,075 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.google.inject.Inject;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import javax.sql.DataSource;
/**
* Redshift table service implementation.
*
* @author tgianos
* @since 1.0.8
*/
public class RedshiftConnectorTableService extends JdbcConnectorTableService {
/**
* Constructor.
*
* @param dataSource the datasource to use to connect to the database
* @param typeConverter The type converter to use from the SQL type to Metacat canonical type
* @param exceptionMapper The exception mapper to use
*/
@Inject
public RedshiftConnectorTableService(
final DataSource dataSource,
final JdbcTypeConverter typeConverter,
final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, typeConverter, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
protected String getRenameTableSql(
final QualifiedName oldName,
final String finalOldTableName,
final String finalNewTableName
) {
return "ALTER TABLE "
+ oldName.getDatabaseName()
+ "."
+ finalOldTableName
+ " RENAME TO "
+ finalNewTableName;
}
/**
* {@inheritDoc}
*/
@Override
protected String getDropTableSql(final QualifiedName name, final String finalTableName) {
return "DROP TABLE " + name.getCatalogName() + "." + name.getDatabaseName() + "." + finalTableName;
}
}
| 2,076 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes for the Redshift Connector implementation.
*
* @author tgianos
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.redshift;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,077 |
0 |
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-redshift/src/main/java/com/netflix/metacat/connector/redshift/RedshiftConnectorModule.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.redshift;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import javax.sql.DataSource;
import java.util.Map;
/**
* Guice module for the Redshift Connector.
*
* @author tgianos
* @since 1.0.0
*/
public class RedshiftConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName catalog shard name
* @param configuration connector configuration
*/
RedshiftConnectorModule(
final String catalogShardName,
final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(RedshiftTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(RedshiftExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, JdbcConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, RedshiftConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 2,078 |
0 |
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig/PigConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.pig;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.connector.pig.converters.PigTypeConverter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* S3 plugin.
*/
public class PigConnectorPlugin implements ConnectorPlugin {
/**
* Type of the connector.
*/
public static final String CONNECTOR_TYPE = "pig";
private static final PigTypeConverter PIG_TYPE_CONVERTER = new PigTypeConverter();
@Override
public String getType() {
return CONNECTOR_TYPE;
}
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return null;
}
@Override
public ConnectorTypeConverter getTypeConverter() {
return PIG_TYPE_CONVERTER;
}
}
| 2,079 |
0 |
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Pig connector classes.
*/
package com.netflix.metacat.connector.pig;
| 2,080 |
0 |
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig
|
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig/converters/PigTypeMapping.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.pig.converters;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import lombok.Getter;
import org.apache.pig.data.DataType;
import java.util.Map;
/**
* Pig type mapping.
*/
public class PigTypeMapping {
@Getter
private static final Map<Type, Byte> CANONICAL_TO_PIG = new ImmutableMap.Builder<Type, Byte>()
.put(VarbinaryType.VARBINARY, Byte.valueOf(DataType.BYTEARRAY))
.put(BaseType.BOOLEAN, Byte.valueOf(DataType.BOOLEAN))
.put(BaseType.INT, Byte.valueOf(DataType.INTEGER))
.put(BaseType.SMALLINT, Byte.valueOf(DataType.INTEGER))
.put(BaseType.TINYINT, Byte.valueOf(DataType.INTEGER))
.put(BaseType.BIGINT, Byte.valueOf(DataType.LONG))
.put(BaseType.FLOAT, Byte.valueOf(DataType.FLOAT))
.put(BaseType.DOUBLE, Byte.valueOf(DataType.DOUBLE))
.put(BaseType.TIMESTAMP, Byte.valueOf(DataType.DATETIME))
.put(BaseType.TIMESTAMP_WITH_TIME_ZONE, Byte.valueOf(DataType.DATETIME))
.put(BaseType.DATE, Byte.valueOf(DataType.DATETIME))
.put(BaseType.TIME, Byte.valueOf(DataType.DATETIME))
.put(BaseType.STRING, Byte.valueOf(DataType.CHARARRAY))
.put(BaseType.UNKNOWN, Byte.valueOf(DataType.UNKNOWN))
.build();
@Getter
private static final Map<Byte, Type> PIG_TO_CANONICAL = new ImmutableMap.Builder<Byte, Type>()
.put(Byte.valueOf(DataType.BOOLEAN), BaseType.BOOLEAN)
.put(Byte.valueOf(DataType.UNKNOWN), BaseType.UNKNOWN)
.put(Byte.valueOf(DataType.BYTE), VarbinaryType.VARBINARY)
.put(Byte.valueOf(DataType.BYTEARRAY), VarbinaryType.VARBINARY)
.put(Byte.valueOf(DataType.INTEGER), BaseType.INT)
.put(Byte.valueOf(DataType.LONG), BaseType.BIGINT)
.put(Byte.valueOf(DataType.BIGINTEGER), BaseType.BIGINT)
.put(Byte.valueOf(DataType.FLOAT), BaseType.FLOAT)
.put(Byte.valueOf(DataType.DOUBLE), BaseType.DOUBLE)
.put(Byte.valueOf(DataType.BIGDECIMAL), BaseType.DOUBLE)
.put(Byte.valueOf(DataType.DATETIME), BaseType.TIMESTAMP)
.put(Byte.valueOf(DataType.CHARARRAY), BaseType.STRING)
.put(Byte.valueOf(DataType.BIGCHARARRAY), BaseType.STRING)
.build();
}
| 2,081 |
0 |
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig
|
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig/converters/PigTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.pig.converters;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.type.ArrayType;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeUtils;
import com.netflix.metacat.common.type.VarcharType;
import lombok.NonNull;
import org.apache.pig.backend.executionengine.ExecutionEngine;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.LocalExecType;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.newplan.logical.Util;
import org.apache.pig.newplan.logical.relational.LogicalSchema;
import org.apache.pig.parser.QueryParserDriver;
import javax.annotation.Nonnull;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
/**
* Class to convert pig to canonical type and vice versa.
*/
public class PigTypeConverter implements ConnectorTypeConverter {
private static final String NAME_ARRAY_ELEMENT = "array_element";
private static final PigContext PIG_CONTEXT = new PigContext(new LocalExecType() {
private static final long serialVersionUID = -54152102366768171L;
@Override
public ExecutionEngine getExecutionEngine(final PigContext pigContext) {
return null;
}
}, new Properties());
/**
* {@inheritDoc}.
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String pigType) {
try {
final LogicalSchema schema = new QueryParserDriver(PIG_CONTEXT,
"util", new HashMap<>()).parseSchema(pigType);
final LogicalSchema.LogicalFieldSchema field = schema.getField(0);
return toCanonicalType(field);
} catch (Exception e) {
throw new IllegalArgumentException(String.format("Invalid type signature: '%s'", pigType));
}
}
/**
* {@inheritDoc}.
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
final Schema schema = new Schema(Util.translateFieldSchema(fromCanonicalTypeToPigSchema(null, type)));
final StringBuilder result = new StringBuilder();
try {
Schema.stringifySchema(result, schema, DataType.GENERIC_WRITABLECOMPARABLE, Integer.MIN_VALUE);
} catch (FrontendException e) {
throw new IllegalArgumentException(String.format("Invalid for Pig converter: '%s'", type.getDisplayName()));
}
return result.toString();
}
private LogicalSchema.LogicalFieldSchema fromCanonicalTypeToPigSchema(final String alias,
final Type canonicalType) {
if (PigTypeMapping.getCANONICAL_TO_PIG().containsKey(canonicalType)) {
return new LogicalSchema.LogicalFieldSchema(alias,
null, PigTypeMapping.getCANONICAL_TO_PIG().get(canonicalType));
} else if (canonicalType instanceof DecimalType) {
return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.DOUBLE);
} else if (canonicalType instanceof VarcharType || canonicalType instanceof CharType) {
return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.CHARARRAY);
} else if (canonicalType instanceof MapType) {
final MapType mapType = (MapType) canonicalType;
LogicalSchema schema = null;
if (((MapType) canonicalType).getValueType() != null
&& !BaseType.UNKNOWN.equals(mapType.getValueType())) {
schema = new LogicalSchema();
schema.addField(fromCanonicalTypeToPigSchema(null, mapType.getValueType()));
}
return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.MAP);
} else if (canonicalType instanceof ArrayType) {
final ArrayType arrayType = (ArrayType) canonicalType;
final LogicalSchema schema = new LogicalSchema();
Type elementType = arrayType.getElementType();
if (elementType != null) {
if (!(elementType instanceof RowType)) {
elementType = RowType.createRowType(
Lists.newArrayList(elementType),
ImmutableList.of(NAME_ARRAY_ELEMENT)
);
}
schema.addField(fromCanonicalTypeToPigSchema(null, elementType));
}
return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.BAG);
} else if (canonicalType instanceof RowType) {
final LogicalSchema schema = new LogicalSchema();
for (RowType.RowField rowField : ((RowType) canonicalType).getFields()) {
schema.addField(fromCanonicalTypeToPigSchema(
rowField.getName() != null ? rowField.getName() : alias,
rowField.getType()));
}
return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.TUPLE);
}
throw new IllegalArgumentException(String.format("Invalid for Pig converter: '%s'",
canonicalType.getDisplayName()));
}
private Type toCanonicalType(final LogicalSchema.LogicalFieldSchema field) {
if (PigTypeMapping.getPIG_TO_CANONICAL().containsKey(field.type)) {
return PigTypeMapping.getPIG_TO_CANONICAL().get(field.type);
}
switch (field.type) {
case DataType.MAP:
return toCanonicalMapType(field);
case DataType.BAG:
return toCanonicalArrayType(field);
case DataType.TUPLE:
return toCanonicalRowType(field);
default:
}
throw new IllegalArgumentException(String.format("Invalid for Pig converter: '%s'", field.toString()));
}
private Type toCanonicalRowType(final LogicalSchema.LogicalFieldSchema field) {
final List<Type> fieldTypes = Lists.newArrayList();
final List<String> fieldNames = Lists.newArrayList();
for (LogicalSchema.LogicalFieldSchema logicalFieldSchema : field.schema.getFields()) {
fieldTypes.add(toCanonicalType(logicalFieldSchema));
fieldNames.add(logicalFieldSchema.alias);
}
return RowType.createRowType(fieldTypes, fieldNames);
}
private Type toCanonicalArrayType(final LogicalSchema.LogicalFieldSchema field) {
final LogicalSchema.LogicalFieldSchema subField = field.schema.getField(0);
final Type elementType;
if (subField.type == DataType.TUPLE
&& !TypeUtils.isNullOrEmpty(subField.schema.getFields())
&& NAME_ARRAY_ELEMENT.equals(subField.schema.getFields().get(0).alias)) {
elementType = toCanonicalType(subField.schema.getFields().get(0));
} else {
elementType = toCanonicalType(subField);
}
return new ArrayType(elementType);
}
private Type toCanonicalMapType(final LogicalSchema.LogicalFieldSchema field) {
final Type key = BaseType.STRING;
Type value = BaseType.UNKNOWN;
if (null != field.schema && !TypeUtils.isNullOrEmpty(field.schema.getFields())) {
value = toCanonicalType(field.schema.getFields().get(0));
}
return new MapType(key, value);
}
}
| 2,082 |
0 |
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig
|
Create_ds/metacat/metacat-connector-pig/src/main/java/com/netflix/metacat/connector/pig/converters/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Created by zhenl on 1/13/17.
*/
package com.netflix.metacat.connector.pig.converters;
| 2,083 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/MetacatApplication.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.ComponentScan.Filter;
import org.springframework.context.annotation.FilterType;
/**
* Spring Boot Metacat application entry point.
*
* @author tgianos
* @since 1.1.0
*/
@SpringBootApplication
@ComponentScan(excludeFilters = @Filter(type = FilterType.ASPECTJ, pattern = "com.netflix.metacat.connector..*"))
public class MetacatApplication {
/**
* Constructor.
*/
protected MetacatApplication() {
}
/**
* Main.
*
* @param args Program arguments
*/
public static void main(final String[] args) {
SpringApplication.run(MetacatApplication.class, args);
}
}
| 2,084 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Top level package for boot application files.
*
* @author tgianos
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,085 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/manager/CatalogManager.java
|
package com.netflix.metacat.main.manager;
import org.springframework.context.ApplicationContext;
/**
* Interface that defines how catalogs should be loaded.
*/
public interface CatalogManager {
/**
* Flag indicating whether all catalogs have been loaded.
*
* @return True if they've been loaded.
*/
boolean areCatalogsLoaded();
/**
* Load the catalogs for this applicationContext.
*
* @param applicationContext The application context.
* @throws Exception exception if we failed to load a catalog.
*/
void loadCatalogs(ApplicationContext applicationContext) throws Exception;
}
| 2,086 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/manager/PluginManager.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.manager;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.converter.TypeConverterFactory;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.ServiceLoader;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Plugin Manager. This loads the connector plugins using the ServiceLoader.
* Connector plugins need to be loaded before loading the catalogs.
*/
@Slf4j
public class PluginManager {
private final ConnectorManager connectorManager;
private final TypeConverterFactory typeConverterFactory;
private final AtomicBoolean pluginsLoaded = new AtomicBoolean();
private final AtomicBoolean pluginsLoading = new AtomicBoolean();
/**
* Constructor.
*
* @param connectorManager manager
* @param typeConverterFactory provider for type converters
*/
public PluginManager(
final ConnectorManager connectorManager,
final TypeConverterFactory typeConverterFactory
) {
this.connectorManager = connectorManager;
this.typeConverterFactory = typeConverterFactory;
}
/**
* Returns true if plugins are loaded.
*
* @return true if plugins are loaded.
*/
public boolean arePluginsLoaded() {
return pluginsLoaded.get();
}
/**
* Loads the plugins.
*
* @throws Exception error
*/
public void loadPlugins() throws Exception {
if (!this.pluginsLoading.compareAndSet(false, true)) {
return;
}
final ServiceLoader<ConnectorPlugin> serviceLoader =
ServiceLoader.load(ConnectorPlugin.class, this.getClass().getClassLoader());
final List<ConnectorPlugin> connectorPlugins = ImmutableList.copyOf(serviceLoader);
if (connectorPlugins.isEmpty()) {
log.warn("No service providers of type {}", ConnectorPlugin.class.getName());
}
for (ConnectorPlugin connectorPlugin : connectorPlugins) {
log.info("Installing {}", connectorPlugin.getClass().getName());
this.installPlugin(connectorPlugin);
log.info("-- Finished loading plugin {} --", connectorPlugin.getClass().getName());
}
this.pluginsLoaded.set(true);
}
/**
* Installs the plugins.
*
* @param connectorPlugin service plugin
*/
private void installPlugin(final ConnectorPlugin connectorPlugin) {
this.connectorManager.addPlugin(connectorPlugin);
this.typeConverterFactory.register(connectorPlugin.getType(), connectorPlugin.getTypeConverter());
}
}
| 2,087 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/manager/DefaultCatalogManager.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.manager;
import com.google.common.base.Preconditions;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationContext;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Catalog manager. This loads the catalogs defined as .properties files under the location defined by config property
* <code>metacat.plugin.config.location</code>.
* Usually there is a one-to-one mapping between a catalog and a data store. We could also have data stores
* (mostly sharded) addressed by a single catalog name.
* If a data store is sharded and needs to be represented by a single catalog name, then we will have multiple catalog
* property files, each referencing to its physical data store and having the same <code>catalog.name</code>.
* In this case, <code>catalog.name</code> and <code>metacat.schema.whitelist</code> will be used to point to the right
* data store. A catalog with no <code>metacat.schema.whitelist</code> setting will be the default catalog representing
* all databases for the <code>catalog.name</code>.
*/
@Slf4j
public class DefaultCatalogManager implements CatalogManager {
private final ConnectorManager connectorManager;
private final File catalogConfigurationDir;
private final AtomicBoolean catalogsLoading = new AtomicBoolean();
private final AtomicBoolean catalogsLoaded = new AtomicBoolean();
private final Registry registry;
private final Config config;
/**
* Constructor.
*
* @param connectorManager manager
* @param config config
* @param registry registry of spectator
*/
public DefaultCatalogManager(
final ConnectorManager connectorManager,
final Config config,
final Registry registry
) {
this.connectorManager = connectorManager;
this.config = config;
this.catalogConfigurationDir = new File(config.getPluginConfigLocation());
this.registry = registry;
}
/**
* Returns true if all catalogs are loaded.
*
* @return true if all catalogs are loaded
*/
@Override
public boolean areCatalogsLoaded() {
return this.catalogsLoaded.get();
}
/**
* Loads catalogs.
*
* @param applicationContext spring application context
* @throws Exception error
*/
@Override
public void loadCatalogs(final ApplicationContext applicationContext) throws Exception {
if (!this.catalogsLoading.compareAndSet(false, true)) {
return;
}
for (final File file : MetacatUtils.listFiles(this.catalogConfigurationDir)) {
if (file.isFile() && file.getName().endsWith(".properties")) {
this.loadCatalog(file, applicationContext);
}
}
this.catalogsLoaded.set(true);
}
protected void loadCatalog(final File file, final ApplicationContext applicationContext) throws Exception {
log.info("-- Loading catalog {} --", file);
final Map<String, String> properties = new HashMap<>(MetacatUtils.loadProperties(file));
final String connectorType = properties.remove(MetacatCatalogConfig.Keys.CONNECTOR_NAME);
Preconditions.checkState(
connectorType != null,
"Catalog configuration %s does not contain connector.name",
file.getAbsoluteFile()
);
// Pass in the server application context to the connector context.
final ConnectorContext connectorContext = MetacatUtils.buildConnectorContext(file,
connectorType, config, registry, applicationContext, properties);
this.connectorManager.createConnection(connectorContext);
log.info("-- Added catalog {} shard {} using connector {} --",
connectorContext.getCatalogName(), connectorContext.getCatalogShardName(), connectorType);
}
}
| 2,088 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/manager/ConnectorManager.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.manager;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Sets;
import com.google.common.collect.Table;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorCatalogService;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorFactoryDecorator;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.exception.CatalogNotFoundException;
import com.netflix.metacat.common.server.connectors.model.CatalogInfo;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.common.Strings;
import javax.annotation.Nonnull;
import javax.annotation.PreDestroy;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Connector manager.
*/
@Slf4j
@RequiredArgsConstructor
public class ConnectorManager {
private static final String EMPTY_STRING = "";
// Map of connector plugins registered.
private final ConcurrentMap<String, ConnectorPlugin> plugins = new ConcurrentHashMap<>();
/**
* Table of catalog name, database name to catalog data stores. Usually there is a one-to-one mapping between the
* catalog name and the data store. In this case the table will have the databse name as null. If there are multiple
* catalogs addressed by the same <code>catalog.name</code> pointing to shards of a data store, then there will be
* multiple entries in this table. An entry will be identified using the catalog name and the database name.
*/
private final Table<String, String, CatalogHolder> catalogs = HashBasedTable.create();
private final Set<MetacatCatalogConfig> catalogConfigs = Sets.newHashSet();
private final Set<ConnectorDatabaseService> databaseServices = Sets.newHashSet();
private final Set<ConnectorTableService> tableServices = Sets.newHashSet();
private final Set<ConnectorPartitionService> partitionServices = Sets.newHashSet();
private final AtomicBoolean stopped = new AtomicBoolean();
private final Config config;
/**
* Stop.
*/
@PreDestroy
public void stop() {
if (stopped.getAndSet(true)) {
return;
}
catalogs.values().forEach(catalogHolder -> {
try {
catalogHolder.getConnectorFactory().stop();
} catch (Throwable t) {
log.error("Error shutting down connector: {}", catalogHolder.getConnectorFactory().getCatalogName(), t);
}
});
}
/**
* add Plugin.
*
* @param connectorPlugin connector plugin
*/
public void addPlugin(final ConnectorPlugin connectorPlugin) {
plugins.put(connectorPlugin.getType(), connectorPlugin);
}
/**
* Creates a connection for the given catalog.
*
* @param connectorContext metacat connector properties
*/
public synchronized void createConnection(final ConnectorContext connectorContext) {
Preconditions.checkState(!stopped.get(), "ConnectorManager is stopped");
final String connectorType = connectorContext.getConnectorType();
final String catalogName = connectorContext.getCatalogName();
final String catalogShardName = connectorContext.getCatalogShardName();
final ConnectorPlugin connectorPlugin = plugins.get(connectorType);
if (connectorPlugin != null) {
final MetacatCatalogConfig catalogConfig =
MetacatCatalogConfig.createFromMapAndRemoveProperties(connectorType, catalogName,
connectorContext.getConfiguration());
final List<String> databaseNames = catalogConfig.getSchemaWhitelist();
if (databaseNames.isEmpty()) {
Preconditions.checkState(!catalogs.contains(catalogName, EMPTY_STRING),
"A catalog with name %s already exists", catalogName);
} else {
databaseNames.forEach(databaseName -> {
Preconditions.checkState(!catalogs.contains(catalogName, databaseName),
"A catalog with name %s for database %s already exists", catalogName, databaseName);
});
}
catalogConfigs.add(catalogConfig);
final ConnectorFactory connectorFactory = new ConnectorFactoryDecorator(connectorPlugin, connectorContext);
try {
databaseServices.add(connectorFactory.getDatabaseService());
} catch (UnsupportedOperationException e) {
log.debug("Catalog {}:{} doesn't support getDatabaseService. Ignoring.", catalogName, catalogShardName);
}
try {
tableServices.add(connectorFactory.getTableService());
} catch (UnsupportedOperationException e) {
log.debug("Catalog {}:{} doesn't support getTableService. Ignoring.", catalogName, catalogShardName);
}
try {
partitionServices.add(connectorFactory.getPartitionService());
} catch (UnsupportedOperationException e) {
log.debug("Catalog {}:{} doesn't support getPartitionService. Ignoring.",
catalogName, catalogShardName);
}
final CatalogHolder catalogHolder = new CatalogHolder(catalogConfig, connectorFactory);
if (databaseNames.isEmpty()) {
catalogs.put(catalogName, EMPTY_STRING, catalogHolder);
} else {
databaseNames.forEach(databaseName -> {
catalogs.put(catalogName, databaseName, catalogHolder);
});
}
} else {
log.warn("No plugin for connector with type {}", connectorType);
}
}
/**
* Returns a set of catalog holders.
*
* @param catalogName catalog name
* @return catalog holders
*/
@Nonnull
private Set<CatalogHolder> getCatalogHolders(final String catalogName) {
final Map<String, CatalogHolder> result = getCatalogHoldersByDatabaseName(catalogName);
if (result.isEmpty()) {
throw new CatalogNotFoundException(catalogName);
} else {
return Sets.newHashSet(result.values());
}
}
private Map<String, CatalogHolder> getCatalogHoldersByDatabaseName(final String catalogName) {
Map<String, CatalogHolder> result = catalogs.row(catalogName);
if (result.isEmpty()) {
final String proxyCatalogName = getConnectorNameFromCatalogName(catalogName);
if (!Strings.isNullOrEmpty(proxyCatalogName)) {
result = catalogs.row(proxyCatalogName);
}
}
return result;
}
/**
* This method should be called only for a proxy catalog. A proxy catalog is a connector catalog that acts as a
* proxy to another service that contains the actual list of catalogs. The convention of the naming is such that
* the connector name is prefixed to the catalog names. Ex: For a catalog configuration with name as 'cde', the
* catalogs under it will be prefixed by 'cde_'.
*
* @param catalogName catalog name
* @return connector name
*/
private String getConnectorNameFromCatalogName(final String catalogName) {
String result = null;
final Iterator<String> splits = Splitter.on("_").limit(2).split(catalogName).iterator();
if (splits.hasNext()) {
result = splits.next();
}
return result;
}
/**
* Returns the catalog holder.
*
* @param name name
* @return catalog holder
*/
@Nonnull
private CatalogHolder getCatalogHolder(final QualifiedName name) {
final String catalogName = name.getCatalogName();
final String databaseName = name.isDatabaseDefinition() ? name.getDatabaseName() : EMPTY_STRING;
final Map<String, CatalogHolder> catalogHolders = getCatalogHoldersByDatabaseName(catalogName);
final CatalogHolder result = catalogHolders.containsKey(databaseName)
? catalogHolders.get(databaseName) : catalogHolders.get(EMPTY_STRING);
if (result == null) {
throw new CatalogNotFoundException(catalogName);
}
return result;
}
/**
* Returns the catalog config based on the qualified name that may or may not have the database name.
* If database name is not present, then the default catalog is returned if there are multiple catalogs with
* the same catalog name.
*
* @param name name
* @return catalog config
*/
@Nonnull
public MetacatCatalogConfig getCatalogConfig(final QualifiedName name) {
return getCatalogHolder(name).getCatalogConfig();
}
/**
* Returns the catalog configs based on the catalog name. In the case where there are multiple catalogs with the
* same catalog name, this method will return multiple catalog configs.
*
* @param name name
* @return set of catalog configs
*/
@Nonnull
public Set<MetacatCatalogConfig> getCatalogConfigs(final String name) {
return getCatalogHolders(name).stream().map(CatalogHolder::getCatalogConfig).collect(Collectors.toSet());
}
/**
* Returns all catalog configs. In the case where a catalog is a proxy connector, the list of catalogs represented
* by the connector will not be included.
* @return set of catalog configs
*/
@Nonnull
public Set<MetacatCatalogConfig> getCatalogConfigs() {
return catalogConfigs;
}
/**
* Returns all catalogs. The list will also include the list of catalogs represented by a proxy connector.
* @return set of catalogs
*/
@Nonnull
public Set<CatalogInfo> getCatalogs() {
return catalogs.column(EMPTY_STRING).values().stream().flatMap(c -> {
final Stream.Builder<CatalogInfo> builder = Stream.builder();
final MetacatCatalogConfig catalogConfig = c.getCatalogConfig();
if (catalogConfig.isProxy()) {
c.getConnectorFactory().getCatalogService()
.list(new ConnectorRequestContext(), QualifiedName.ofCatalog(catalogConfig.getCatalogName()),
null, null, null)
.forEach(builder);
} else {
builder.accept(catalogConfig.toCatalogInfo());
}
return builder.build();
}).collect(Collectors.toSet());
}
/**
* Returns the connector factory for the given <code>name</code>.
*
* @param name qualified name
* @return Returns the connector factory for the given <code>name</code>
*/
private ConnectorFactory getConnectorFactory(final QualifiedName name) {
Preconditions.checkNotNull(name, "Name is null");
return getCatalogHolder(name).getConnectorFactory();
}
/**
* Returns the connector plugin for the given <code>catalogName</code>.
*
* @param connectorType connector type
* @return Returns the plugin for the given <code>catalogName</code>
*/
public ConnectorPlugin getPlugin(final String connectorType) {
Preconditions.checkNotNull(connectorType, "connectorType is null");
final ConnectorPlugin result = plugins.get(connectorType);
Preconditions.checkNotNull(result, "No connector plugin exists for type %s", connectorType);
return result;
}
/**
* Returns all the connector database services.
*
* @return Returns all the connector database services registered in the system.
*/
public Set<ConnectorDatabaseService> getDatabaseServices() {
return databaseServices;
}
/**
* Returns all the connector table services.
*
* @return Returns all the connector table services registered in the system.
*/
public Set<ConnectorTableService> getTableServices() {
return tableServices;
}
/**
* Returns all the connector partition services.
*
* @return Returns all the connector partition services registered in the system.
*/
public Set<ConnectorPartitionService> getPartitionServices() {
return partitionServices;
}
/**
* Returns the connector catalog service for the given <code>name</code>.
*
* @param name qualified name
* @return Returns the connector catalog service for the given <code>name</code>
*/
public ConnectorCatalogService getCatalogService(final QualifiedName name) {
return getConnectorFactory(name).getCatalogService();
}
/**
* Returns the connector database service for the given <code>name</code>.
*
* @param name qualified name
* @return Returns the connector database service for the given <code>name</code>
*/
public ConnectorDatabaseService getDatabaseService(final QualifiedName name) {
return getConnectorFactory(name).getDatabaseService();
}
/**
* Returns the connector table service for the given <code>name</code>.
*
* @param name qualified name
* @return Returns the connector table service for the given <code>name</code>
*/
public ConnectorTableService getTableService(final QualifiedName name) {
return getConnectorFactory(name).getTableService();
}
/**
* Returns the connector partition service for the given <code>name</code>.
*
* @param name qualified name
* @return Returns the connector partition service for the given <code>name</code>
*/
public ConnectorPartitionService getPartitionService(final QualifiedName name) {
return getConnectorFactory(name).getPartitionService();
}
/**
* Returns the connector type converter for the given <code>connectorType</code>.
*
* @param connectorType connector type
* @return Returns the connector type converter for the given <code>connectorType</code>
*/
public ConnectorTypeConverter getTypeConverter(final String connectorType) {
return getPlugin(connectorType).getTypeConverter();
}
/**
* Returns the connector dto converter for the given <code>connectorType</code>.
*
* @param connectorType connector type
* @return Returns the connector dto converter for the given <code>connectorType</code>
*/
public ConnectorInfoConverter getInfoConverter(final String connectorType) {
return getPlugin(connectorType).getInfoConverter();
}
/**
* A Holder class holding the catalog's config and connector factory.
*/
@Data
@AllArgsConstructor
private static class CatalogHolder {
private final MetacatCatalogConfig catalogConfig;
private final ConnectorFactory connectorFactory;
}
}
| 2,089 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/manager/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package includes initialization classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.main.manager;
import javax.annotation.ParametersAreNonnullByDefault;
| 2,090 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/PropertiesConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.properties.DefaultConfigImpl;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Configuration for binding Metacat properties.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class PropertiesConfig {
/**
* Static properties bindings.
*
* @return The metacat properties.
*/
@Bean
@ConfigurationProperties("metacat")
public MetacatProperties metacatProperties() {
return new MetacatProperties();
}
/**
* Get the configuration abstraction for use in metacat.
*
* @param metacatProperties The overall metacat properties to use
* @return The configuration object
*/
@Bean
public Config config(final MetacatProperties metacatProperties) {
return new DefaultConfigImpl(metacatProperties);
}
}
| 2,091 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/SpringFoxConfig.java
|
package com.netflix.metacat.main.configs;
import org.springframework.boot.actuate.autoconfigure.endpoint.web.CorsEndpointProperties;
import org.springframework.boot.actuate.autoconfigure.endpoint.web.WebEndpointProperties;
import org.springframework.boot.actuate.autoconfigure.web.server.ManagementPortType;
import org.springframework.boot.actuate.endpoint.ExposableEndpoint;
import org.springframework.boot.actuate.endpoint.web.EndpointLinksResolver;
import org.springframework.boot.actuate.endpoint.web.EndpointMapping;
import org.springframework.boot.actuate.endpoint.web.EndpointMediaTypes;
import org.springframework.boot.actuate.endpoint.web.ExposableWebEndpoint;
import org.springframework.boot.actuate.endpoint.web.WebEndpointsSupplier;
import org.springframework.boot.actuate.endpoint.web.annotation.ControllerEndpointsSupplier;
import org.springframework.boot.actuate.endpoint.web.annotation.ServletEndpointsSupplier;
import org.springframework.boot.actuate.endpoint.web.servlet.WebMvcEndpointHandlerMapping;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.util.StringUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Needed to get SpringFox working with SBN 2.6+.
*/
@Configuration
public class SpringFoxConfig {
/**
* Needed this bean initialization to have springfox work with SBN 2.6+.
*
* @param webEndpointsSupplier web endpoint supplier
* @param servletEndpointsSupplier servlet endpoint supplier
* @param controllerEndpointsSupplier controller endpoint supplier
* @param endpointMediaTypes media types
* @param corsProperties CORS properties
* @param webEndpointProperties web endpoint properties
* @param environment application environment
* @return WebMvcEndpointHandlerMapping
*/
@Bean
public WebMvcEndpointHandlerMapping webEndpointServletHandlerMapping(
final WebEndpointsSupplier webEndpointsSupplier,
final ServletEndpointsSupplier servletEndpointsSupplier,
final ControllerEndpointsSupplier controllerEndpointsSupplier,
final EndpointMediaTypes endpointMediaTypes,
final CorsEndpointProperties corsProperties,
final WebEndpointProperties webEndpointProperties,
final Environment environment) {
final List<ExposableEndpoint<?>> allEndpoints = new ArrayList<>();
final Collection<ExposableWebEndpoint> webEndpoints = webEndpointsSupplier.getEndpoints();
allEndpoints.addAll(webEndpoints);
allEndpoints.addAll(servletEndpointsSupplier.getEndpoints());
allEndpoints.addAll(controllerEndpointsSupplier.getEndpoints());
final String basePath = webEndpointProperties.getBasePath();
final EndpointMapping endpointMapping = new EndpointMapping(basePath);
final boolean shouldRegisterLinksMapping = this.shouldRegisterLinksMapping(
webEndpointProperties, environment, basePath);
return new WebMvcEndpointHandlerMapping(endpointMapping, webEndpoints, endpointMediaTypes,
corsProperties.toCorsConfiguration(), new EndpointLinksResolver(allEndpoints, basePath),
shouldRegisterLinksMapping, null);
}
private boolean shouldRegisterLinksMapping(final WebEndpointProperties webEndpointProperties,
final Environment environment,
final String basePath) {
return webEndpointProperties.getDiscovery().isEnabled()
&& (StringUtils.hasText(basePath)
|| ManagementPortType.get(environment).equals(ManagementPortType.DIFFERENT));
}
}
| 2,092 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/ThriftConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.metacat.common.server.api.v1.MetacatV1;
import com.netflix.metacat.common.server.api.v1.PartitionV1;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.MetacatThriftService;
import com.netflix.metacat.thrift.CatalogThriftServiceFactory;
import com.netflix.metacat.thrift.CatalogThriftServiceFactoryImpl;
import com.netflix.metacat.thrift.DateConverters;
import com.netflix.metacat.thrift.HiveConverters;
import com.netflix.metacat.thrift.HiveConvertersImpl;
import com.netflix.spectator.api.Registry;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring Configuration for the Thrift Module.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class ThriftConfig {
/**
* The hive converters implementation to use.
*
* @return The hive converters
*/
@Bean
public HiveConverters hiveConverters() {
return new HiveConvertersImpl();
}
/**
* The Catalog Thrift Service Factory.
*
* @param config Application config to use
* @param hiveConverters Hive converters to use
* @param metacatV1 The Metacat V1 API implementation to use
* @param partitionV1 The Metacat Partition V1 API to use
* @param registry registry for spectator
* @return The CatalogThriftServiceFactory
*/
@Bean
public CatalogThriftServiceFactory catalogThriftServiceFactory(
final Config config,
final HiveConverters hiveConverters,
final MetacatV1 metacatV1,
final PartitionV1 partitionV1,
final Registry registry
) {
return new CatalogThriftServiceFactoryImpl(
config,
hiveConverters,
metacatV1,
partitionV1,
registry
);
}
/**
* The date converter utility bean.
*
* @param config System configuration
* @return The date converters bean to use
*/
//TODO: Not sure if this is needed doesn't seem to be being used
@Bean
public DateConverters dateConverters(final Config config) {
return new DateConverters(config);
}
/**
* The MetacatThriftService.
*
* @param catalogThriftServiceFactory The factory to use
* @param connectorManager The connector manager to use
* @return The service bean
*/
@Bean
public MetacatThriftService metacatThriftService(
final CatalogThriftServiceFactory catalogThriftServiceFactory,
final ConnectorManager connectorManager
) {
return new MetacatThriftService(catalogThriftServiceFactory, connectorManager);
}
}
| 2,093 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/MetricsConfig.java
|
/*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Configuration of Metrics.
*
* @author rveeramacheneni
*/
@Configuration
public class MetricsConfig {
/**
* A default registry.
*
* @return The registry to use.
*/
@Bean
@ConditionalOnProperty(value = "metacat.metrics.default-registry.enabled", havingValue = "true")
public Registry spectatorRegistry() {
return new DefaultRegistry();
}
}
| 2,094 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/ServicesConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.server.api.ratelimiter.DefaultRateLimiter;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import com.netflix.metacat.common.server.api.traffic_control.DefaultRequestGateway;
import com.netflix.metacat.common.server.api.traffic_control.RequestGateway;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.usermetadata.AliasService;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.DefaultAliasService;
import com.netflix.metacat.common.server.usermetadata.DefaultAuthorizationService;
import com.netflix.metacat.common.server.usermetadata.DefaultLookupService;
import com.netflix.metacat.common.server.usermetadata.DefaultTagService;
import com.netflix.metacat.common.server.usermetadata.DefaultUserMetadataService;
import com.netflix.metacat.common.server.usermetadata.LookupService;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.main.manager.CatalogManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.manager.PluginManager;
import com.netflix.metacat.main.services.CatalogService;
import com.netflix.metacat.main.services.CatalogTraversal;
import com.netflix.metacat.main.services.CatalogTraversalServiceHelper;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.MViewService;
import com.netflix.metacat.main.services.MetacatServiceHelper;
import com.netflix.metacat.main.services.MetacatThriftService;
import com.netflix.metacat.main.services.MetadataService;
import com.netflix.metacat.main.services.OwnerValidationService;
import com.netflix.metacat.main.services.PartitionService;
import com.netflix.metacat.main.services.TableService;
import com.netflix.metacat.main.services.health.MetacatHealthIndicator;
import com.netflix.metacat.main.services.impl.CatalogServiceImpl;
import com.netflix.metacat.main.services.impl.ConnectorTableServiceProxy;
import com.netflix.metacat.main.services.impl.DatabaseServiceImpl;
import com.netflix.metacat.main.services.impl.DefaultOwnerValidationService;
import com.netflix.metacat.main.services.impl.MViewServiceImpl;
import com.netflix.metacat.main.services.impl.PartitionServiceImpl;
import com.netflix.metacat.main.services.impl.TableServiceImpl;
import com.netflix.metacat.main.services.init.MetacatCoreInitService;
import com.netflix.metacat.main.services.init.MetacatThriftInitService;
import com.netflix.spectator.api.Registry;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration of Service Tier.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class ServicesConfig {
/**
* No-op User Metadata service.
*
* @return User metadata service based on MySql
*/
@Bean
@ConditionalOnMissingBean(UserMetadataService.class)
public UserMetadataService userMetadataService() {
return new DefaultUserMetadataService();
}
/**
* No-op Tag service.
*
* @return User metadata service based on MySql
*/
@Bean
@ConditionalOnMissingBean(TagService.class)
public TagService tagService() {
return new DefaultTagService();
}
/**
* Authorization service.
*
* @param config metacat config
* @return authorization class based on config
*/
@Bean
@ConditionalOnMissingBean(AuthorizationService.class)
public AuthorizationService authorizationService(
final Config config
) {
return new DefaultAuthorizationService(config);
}
/**
* Owner validation service.
* @param registry the spectator registry
* @return the owner validation service
*/
@Bean
@ConditionalOnMissingBean(OwnerValidationService.class)
public OwnerValidationService ownerValidationService(final Registry registry) {
return new DefaultOwnerValidationService(registry);
}
/**
* Alias service.
*
* @return an instance of the Alias service.
*/
@Bean
@ConditionalOnMissingBean(AliasService.class)
public AliasService aliasService() {
return new DefaultAliasService();
}
/**
* No-op Look up service.
*
* @return User metadata service based on MySql
*/
@Bean
@ConditionalOnMissingBean(LookupService.class)
public LookupService lookupService() {
return new DefaultLookupService();
}
/**
* RateLimiter service.
*
* @return The rate-limiter service bean.
*/
@Bean
@ConditionalOnMissingBean(RateLimiter.class)
public RateLimiter rateLimiter() {
return new DefaultRateLimiter();
}
/**
* The default {@link RequestGateway} bean.
*
* @return the default {@link RequestGateway} bean.
*/
@Bean
@ConditionalOnMissingBean(RequestGateway.class)
public RequestGateway requestGateway() {
return new DefaultRequestGateway();
}
/**
* The catalog service bean.
*
* @param connectorManager Connector manager to use
* @param userMetadataService User metadata service
* @param metacatEventBus Event bus to use
* @param converterUtil Converter utilities
* @return Catalog service implementation
*/
@Bean
public CatalogService catalogService(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus metacatEventBus,
final ConverterUtil converterUtil
) {
return new CatalogServiceImpl(connectorManager, userMetadataService, metacatEventBus, converterUtil);
}
/**
* The database service bean.
*
* @param connectorManager Connector manager to use
* @param userMetadataService User metadata service to use
* @param metacatEventBus Event bus to use
* @param converterUtil Converter utilities
* @param authorizationService authorization Service
* @return Catalog service implementation
*/
@Bean
public DatabaseService databaseService(
final ConnectorManager connectorManager,
final UserMetadataService userMetadataService,
final MetacatEventBus metacatEventBus,
final ConverterUtil converterUtil,
final AuthorizationService authorizationService
) {
return new DatabaseServiceImpl(
connectorManager,
userMetadataService,
metacatEventBus,
converterUtil,
authorizationService
);
}
/**
* The table service bean.
*
* @param connectorManager Connector manager to use
* @param connectorTableServiceProxy connector table service proxy
* @param databaseService database service
* @param tagService tag service
* @param userMetadataService user metadata service
* @param metacatJson metacat json utility
* @param eventBus Internal event bus
* @param registry registry handle
* @param config configurations
* @param converterUtil converter utilities
* @param authorizationService authorization Service
* @param ownerValidationService owner validation service
*
* @return The table service bean
*/
@Bean
public TableService tableService(
final ConnectorManager connectorManager,
final ConnectorTableServiceProxy connectorTableServiceProxy,
final DatabaseService databaseService,
final TagService tagService,
final UserMetadataService userMetadataService,
final MetacatJson metacatJson,
final MetacatEventBus eventBus,
final Registry registry,
final Config config,
final ConverterUtil converterUtil,
final AuthorizationService authorizationService,
final OwnerValidationService ownerValidationService) {
return new TableServiceImpl(
connectorManager,
connectorTableServiceProxy,
databaseService,
tagService,
userMetadataService,
metacatJson,
eventBus,
registry,
config,
converterUtil,
authorizationService,
ownerValidationService
);
}
/**
* The connector table service proxy bean.
*
* @param connectorManager Connector manager to use
* @param converterUtil Converter utilities
* @return The connector table service proxy bean
*/
@Bean
public ConnectorTableServiceProxy connectorTableServiceProxy(
final ConnectorManager connectorManager,
final ConverterUtil converterUtil
) {
return new ConnectorTableServiceProxy(
connectorManager,
converterUtil
);
}
/**
* Partition service bean.
*
* @param catalogService catalog service
* @param connectorManager connector manager
* @param tableService table service
* @param userMetadataService user metadata service
* @param threadServiceManager thread manager
* @param config configurations
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @param registry registry handle
* @return The partition service implementation to use
*/
@Bean
public PartitionService partitionService(
final CatalogService catalogService,
final ConnectorManager connectorManager,
final TableService tableService,
final UserMetadataService userMetadataService,
final ThreadServiceManager threadServiceManager,
final Config config,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil,
final Registry registry
) {
return new PartitionServiceImpl(
catalogService,
connectorManager,
tableService,
userMetadataService,
threadServiceManager,
config,
eventBus,
converterUtil,
registry
);
}
/**
* The MViewService bean.
*
* @param connectorManager connector manager
* @param tableService table service
* @param partitionService partition service
* @param userMetadataService user metadata service
* @param eventBus Internal event bus
* @param converterUtil utility to convert to/from Dto to connector resources
* @return The MViewService implementation to use
*/
@Bean
public MViewService mViewService(
final ConnectorManager connectorManager,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final ConverterUtil converterUtil
) {
return new MViewServiceImpl(
connectorManager,
tableService,
partitionService,
userMetadataService,
eventBus,
converterUtil
);
}
/**
* The service helper.
*
* @param databaseService database service
* @param tableService table service
* @param partitionService partition service
* @param eventBus event bus
* @param mViewService view service
* @return The service helper instance to use
*/
@Bean
public MetacatServiceHelper metacatServiceHelper(
final DatabaseService databaseService,
final TableService tableService,
final PartitionService partitionService,
final MViewService mViewService,
final MetacatEventBus eventBus
) {
return new MetacatServiceHelper(databaseService, tableService, partitionService, mViewService, eventBus);
}
/**
* Metadata service bean.
*
* @param config System config
* @param tableService The table service to use
* @param partitionService The partition service to use
* @param userMetadataService The user metadata service to use
* @param tagService tag service
* @param helper Metacat service helper
* @param registry registry for spectator
* @return The metadata service bean
*/
@Bean
public MetadataService metadataService(
final Config config,
final TableService tableService,
final PartitionService partitionService,
final UserMetadataService userMetadataService,
final TagService tagService,
final MetacatServiceHelper helper,
final Registry registry
) {
return new MetadataService(config, tableService, partitionService, userMetadataService,
tagService, helper, registry);
}
/**
* The core initialization service that will handle startup and shutdown of the catalog.
* We do not configure the start and stop methods as bean lifecycle methods to Spring since they are
* called via the thrift init service.
*
* @param pluginManager Plugin manager to use
* @param catalogManager Catalog manager to use
* @param connectorManager Connector manager to use
* @param threadServiceManager Thread service manager to use
* @param applicationContext the application context
*
* @return The initialization service bean
*/
@Bean
public MetacatCoreInitService metacatCoreInitService(final PluginManager pluginManager,
final CatalogManager catalogManager,
final ConnectorManager connectorManager,
final ThreadServiceManager threadServiceManager,
final ApplicationContext applicationContext) {
return new MetacatCoreInitService(
pluginManager, catalogManager, connectorManager,
threadServiceManager, applicationContext);
}
/**
* The initialization service that will handle startup and shutdown of Metacat thrift service.
*
* @param metacatCoreInitService the core init service
* @param metacatThriftService Thrift service to use
*
* @return The initialization service bean
*/
@Bean(initMethod = "start", destroyMethod = "stop")
public MetacatThriftInitService metacatThriftInitService(final MetacatCoreInitService metacatCoreInitService,
final MetacatThriftService metacatThriftService) {
return new MetacatThriftInitService(
metacatThriftService,
metacatCoreInitService
);
}
/**
* Metacat health indicator.
*
* @param metacatCoreInitService the code in it service
* @param metacatThriftInitService the thrift init service
*
* @return the health indicator
*/
@Bean
public MetacatHealthIndicator metacatHealthIndicator(final MetacatCoreInitService metacatCoreInitService,
final MetacatThriftInitService metacatThriftInitService) {
return new MetacatHealthIndicator(
metacatCoreInitService, metacatThriftInitService
);
}
/**
* The catalog traversal service helper.
*
* @param catalogService Catalog service
* @param databaseService Database service
* @param tableService Table service
* @return The catalog traversal service helper bean
*/
@Bean
public CatalogTraversalServiceHelper catalogTraversalServiceHelper(
final CatalogService catalogService,
final DatabaseService databaseService,
final TableService tableService
) {
return new CatalogTraversalServiceHelper(
catalogService,
databaseService,
tableService
);
}
/**
* The catalog traversal bean.
*
* @param config System config
* @param catalogTraversalServiceHelper traversal service helper
* @param registry registry of spectator
* @return The catalog traversal bean
*/
@Bean
public CatalogTraversal catalogTraversal(
final Config config,
final CatalogTraversalServiceHelper catalogTraversalServiceHelper,
final Registry registry
) {
return new CatalogTraversal(
config,
catalogTraversalServiceHelper,
registry
);
}
}
| 2,095 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/CacheConfig.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.configs;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration for cache.
*
* @author amajumdar
* @since 1.2.0
*/
@Configuration
@ConditionalOnProperty(value = "metacat.cache.enabled", havingValue = "true")
@EnableCaching
public class CacheConfig {
}
| 2,096 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/CommonServerConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.json.MetacatJson;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.converter.DozerJsonTypeConverter;
import com.netflix.metacat.common.server.converter.DozerTypeConverter;
import com.netflix.metacat.common.server.converter.TypeConverterFactory;
import com.netflix.metacat.common.server.events.MetacatApplicationEventMulticaster;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatEventListenerFactory;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.properties.MetacatProperties;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.spectator.api.Registry;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.AnnotationConfigUtils;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.event.EventListenerFactory;
/**
* Common configuration for Metacat based on classes found in the common server module.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class CommonServerConfig {
private static final String DEFAULT_TYPE_CONVERTER = "defaultTypeConverter";
/**
* An object mapper bean to use if none already exists.
*
* @return JSON object mapper
*/
@Bean
@ConditionalOnMissingBean(ObjectMapper.class)
public ObjectMapper objectMapper() {
return new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.setSerializationInclusion(JsonInclude.Include.ALWAYS);
}
/**
* Metacat JSON Handler.
*
* @return The JSON handler
*/
@Bean
public MetacatJson metacatJson() {
return new MetacatJsonLocator();
}
/**
* The data source manager to use.
*
* @return The data source manager
*/
@Bean
public DataSourceManager dataSourceManager() {
return DataSourceManager.get();
}
/**
* The event bus abstraction to use.
*
* @param applicationEventMulticaster The asynchronous event publisher
* @param registry registry for spectator
* @return The event bus to use.
*/
@Bean
public MetacatEventBus eventBus(
final MetacatApplicationEventMulticaster applicationEventMulticaster,
final Registry registry
) {
return new MetacatEventBus(applicationEventMulticaster, registry);
}
/**
* The application event multicaster to use.
* @param registry registry for spectator
* @param metacatProperties The metacat properties to get number of executor threads from.
* Likely best to do one more than number of CPUs
* @return The application event multicaster to use.
*/
@Bean
public MetacatApplicationEventMulticaster applicationEventMulticaster(final Registry registry,
final MetacatProperties metacatProperties) {
return new MetacatApplicationEventMulticaster(registry, metacatProperties);
}
/**
* Default event listener factory.
* @return The application event multicaster to use.
*/
@Bean(AnnotationConfigUtils.EVENT_LISTENER_FACTORY_BEAN_NAME)
public EventListenerFactory eventListenerFactory() {
return new MetacatEventListenerFactory();
}
/**
* The type converter factory to use.
*
* @param defaultTypeConverter default type converter
* @return The type converter factory
*/
@Bean
public TypeConverterFactory typeConverterFactory(@Qualifier(DEFAULT_TYPE_CONVERTER)
final ConnectorTypeConverter defaultTypeConverter) {
return new TypeConverterFactory(defaultTypeConverter);
}
/**
* The default type converter.
*
* @param config The system configuration
* @return default type converter
*/
@Bean(DEFAULT_TYPE_CONVERTER)
public ConnectorTypeConverter defaultTypeConverter(final Config config) {
try {
return (ConnectorTypeConverter) Class.forName(config.getDefaultTypeConverter()).newInstance();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
/**
* The dozer type converter to use.
*
* @param typeConverterFactory The type converter factory to use
* @return type converter
*/
@Bean
public DozerTypeConverter dozerTypeConverter(final TypeConverterFactory typeConverterFactory) {
return new DozerTypeConverter(typeConverterFactory);
}
/**
* The dozer type converter to JSON format.
*
* @param typeConverterFactory The type converter factory to use
* @return type converter
*/
@Bean
public DozerJsonTypeConverter dozerJsonTypeConverter(final TypeConverterFactory typeConverterFactory) {
return new DozerJsonTypeConverter(typeConverterFactory);
}
/**
* Converter utility bean.
*
* @param dozerTypeConverter The Dozer type converter to use.
* @param dozerJsonTypeConverter The dozer type converter to JSON format.
* @return The converter util instance
*/
@Bean
public ConverterUtil converterUtil(final DozerTypeConverter dozerTypeConverter,
final DozerJsonTypeConverter dozerJsonTypeConverter) {
return new ConverterUtil(dozerTypeConverter, dozerJsonTypeConverter);
}
/**
* Get the ThreadServiceManager.
* @param registry registry for spectator
* @param config System configuration
* @return The thread service manager to use
*/
@Bean
public ThreadServiceManager threadServiceManager(final Registry registry, final Config config) {
return new ThreadServiceManager(registry, config);
}
}
| 2,097 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/ManagerConfig.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.netflix.metacat.common.server.converter.TypeConverterFactory;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.type.TypeManager;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.main.manager.CatalogManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.manager.DefaultCatalogManager;
import com.netflix.metacat.main.manager.PluginManager;
import com.netflix.spectator.api.Registry;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration for Management beans.
*
* @author tgianos
* @since 1.1.0
*/
@Configuration
public class ManagerConfig {
/**
* Manager of the connectors.
*
* @param config System config
* @return The connector manager instance to use.
*/
@Bean
public ConnectorManager connectorManager(final Config config) {
return new ConnectorManager(config);
}
/**
* Type manager to use.
*
* @return The type registry
*/
@Bean
public TypeManager typeManager() {
// TODO: Get rid of this static instantiation as Spring will manage singleton
return TypeRegistry.getTypeRegistry();
}
/**
* The plugin manager.
*
* @param connectorManager Connector manager to use
* @param typeConverterFactory Type converter factory to use
* @return The plugin manager instance
*/
@Bean
public PluginManager pluginManager(
final ConnectorManager connectorManager,
final TypeConverterFactory typeConverterFactory
) {
return new PluginManager(connectorManager, typeConverterFactory);
}
/**
* Catalog manager.
*
* @param connectorManager The connector manager to use
* @param config The system configuration to use
* @param registry registry for spectator
* @return Configured catalog manager
*/
@Bean
@ConditionalOnMissingBean(CatalogManager.class)
public CatalogManager catalogManager(
final ConnectorManager connectorManager,
final Config config,
final Registry registry
) {
return new DefaultCatalogManager(connectorManager, config, registry);
}
}
| 2,098 |
0 |
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main
|
Create_ds/metacat/metacat-main/src/main/java/com/netflix/metacat/main/configs/SwaggerConfig.java
|
/*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.main.configs;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.properties.Config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import springfox.bean.validators.configuration.BeanValidatorPluginsConfiguration;
import springfox.documentation.builders.PathSelectors;
import springfox.documentation.builders.RequestHandlerSelectors;
import springfox.documentation.service.ApiInfo;
import springfox.documentation.service.Contact;
import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spring.web.plugins.Docket;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
/**
* Spring configuration for Swagger via SpringFox.
*
* see: https://github.com/springfox/springfox
* @author tgianos
* @since 1.1.0
*/
@Configuration
@ConditionalOnProperty(value = "springfox.documentation.swagger-ui.enabled", havingValue = "true")
@EnableSwagger2
@Import(BeanValidatorPluginsConfiguration.class)
public class SwaggerConfig {
/**
* Configure Spring Fox.
*
* @param config The configuration
* @return The spring fox docket.
*/
@Bean
public Docket api(final Config config) {
return new Docket(DocumentationType.SWAGGER_2)
.apiInfo(
/**
* public ApiInfo(
String title,
String description,
String version,
String termsOfServiceUrl,
Contact contact,
String license,
String licenseUrl,
Collection<VendorExtension> vendorExtensions)
*/
new ApiInfo(
"Metacat API",
"The set of APIs available in this version of metacat",
"1.1.0", // TODO: Swap out with dynamic from config
null,
new Contact("Netflix, Inc.", "https://jobs.netflix.com/", null),
"Apache 2.0",
"http://www.apache.org/licenses/LICENSE-2.0",
Lists.newArrayList()
)
)
.select()
.apis(RequestHandlerSelectors.basePackage("com.netflix.metacat.main.api"))
.paths(PathSelectors.any())
.build()
.pathMapping("/")
.useDefaultResponseMessages(false);
}
//TODO: Update with more detailed swagger configurations
// see: http://tinyurl.com/glla6vc
}
| 2,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.