index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Data transfer objects related to notification events.
*
* @author tgianos
* @since 0.1.47
*/
package com.netflix.metacat.common.dto.notifications;
| 9,700 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/SNSMessageFactory.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.metacat.common.dto.notifications.sns.messages.AddPartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.CreateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeletePartitionMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.DeleteTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.RenameTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTableMessage;
import com.netflix.metacat.common.dto.notifications.sns.messages.UpdateTablePartitionsMessage;
import lombok.NonNull;
import java.io.IOException;
/**
* Create SNSMessage object based on the JSON passed in.
*
* @author tgianos
* @since 0.1.47
*/
public class SNSMessageFactory {
private static final String TYPE_FIELD = "type";
private final ObjectMapper mapper;
/**
* Constructor.
*
* @param mapper The object mapper to use for deserialization
*/
public SNSMessageFactory(@NonNull final ObjectMapper mapper) {
this.mapper = mapper;
}
/**
* Convert a JSON String into a message if possible.
*
* @param json The body of the message to convert back to the original message object from JSON string
* @return The message bound back into a POJO
* @throws IOException When the input isn't valid JSON
*/
public SNSMessage<?> getMessage(@NonNull final String json) throws IOException {
final JsonNode object = this.mapper.readTree(json);
if (object.has(TYPE_FIELD)) {
final SNSMessageType messageType = SNSMessageType.valueOf(object.get(TYPE_FIELD).asText());
switch (messageType) {
case TABLE_CREATE:
return this.mapper.readValue(json, CreateTableMessage.class);
case TABLE_DELETE:
return this.mapper.readValue(json, DeleteTableMessage.class);
case TABLE_UPDATE:
return this.mapper.readValue(json, UpdateTableMessage.class);
case TABLE_RENAME:
return this.mapper.readValue(json, RenameTableMessage.class);
case TABLE_PARTITIONS_UPDATE:
return this.mapper.readValue(json, UpdateTablePartitionsMessage.class);
case PARTITION_ADD:
return this.mapper.readValue(json, AddPartitionMessage.class);
case PARTITION_DELETE:
return this.mapper.readValue(json, DeletePartitionMessage.class);
default:
throw new UnsupportedOperationException("Unknown type " + messageType);
}
} else {
// won't know how to bind
throw new IOException("Invalid content. No field type field found");
}
}
}
| 9,701 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/SNSMessageType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns;
/**
* Enumeration of the various types of SNS events there can be.
*
* @author tgianos
* @since 0.1.47
*/
public enum SNSMessageType {
/**
* When a table is created.
*/
TABLE_CREATE,
/**
* When a table is deleted.
*/
TABLE_DELETE,
/**
* When the metadata about a table is updated somehow.
*/
TABLE_UPDATE,
/**
* When a table is renamed.
*/
TABLE_RENAME,
/**
* When the partitions for a table are either created or deleted.
*/
TABLE_PARTITIONS_UPDATE,
/**
* When a partition is added.
*/
PARTITION_ADD,
/**
* When a partition is deleted.
*/
PARTITION_DELETE,
/**
* When a partition metadata is saved only.
*/
PARTITION_METADATAONLY_SAVE
}
| 9,702 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Data Transfer Objects (DTO) for AWS SNS notifications from Metacat.
*
* @author tgianos
* @since 0.1.47
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.dto.notifications.sns;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,703 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/SNSMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.BaseDto;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import javax.annotation.Nullable;
/**
* Base SNS notification DTO with shared fields.
*
* @param <P> The type of payload this notification has
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString
@EqualsAndHashCode(callSuper = false)
@SuppressFBWarnings
public class SNSMessage<P> extends BaseDto {
private final String source = "metacat";
private final String id;
private final long timestamp;
private final String requestId;
private final String name;
private final SNSMessageType type;
private final P payload;
/**
* Create a new SNSMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param type The type of notification
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public SNSMessage(
@JsonProperty("id") @NonNull final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") @NonNull final String requestId,
@JsonProperty("type") @NonNull final SNSMessageType type,
@JsonProperty("name") @NonNull final String name,
@JsonProperty("payload") @Nullable final P payload
) {
this.id = id;
this.timestamp = timestamp;
this.requestId = requestId;
this.type = type;
this.name = name;
this.payload = payload;
}
}
| 9,704 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/DeletePartitionMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a partition is deleted.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class DeletePartitionMessage extends SNSMessage<String> {
/**
* Create a new DeletePartitionMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public DeletePartitionMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final String payload
) {
super(id, timestamp, requestId, SNSMessageType.PARTITION_DELETE, name, payload);
}
}
| 9,705 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/UpdateTableMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.payloads.UpdatePayload;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a table is updated.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class UpdateTableMessage extends UpdateOrRenameTableMessageBase {
/**
* Create a new UpdateTableMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public UpdateTableMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final UpdatePayload<TableDto> payload
) {
super(id, timestamp, requestId, name, payload, SNSMessageType.TABLE_UPDATE);
}
}
| 9,706 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/UpdateTablePartitionsMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.payloads.TablePartitionsUpdatePayload;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* Message sent when the partitions for a table are updated.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class UpdateTablePartitionsMessage extends SNSMessage<TablePartitionsUpdatePayload> {
/**
* Create a new UpdateTablePartitionsMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
public UpdateTablePartitionsMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final TablePartitionsUpdatePayload payload
) {
super(id, timestamp, requestId, SNSMessageType.TABLE_PARTITIONS_UPDATE, name, payload);
}
}
| 9,707 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/UpdateOrRenameTableMessageBase.java
|
/*
*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.payloads.UpdatePayload;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* Base message type for Update and Rename messages.
*
* @author rveeramacheneni
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public abstract class UpdateOrRenameTableMessageBase extends SNSMessage<UpdatePayload<TableDto>> {
/**
* Ctor for this base class.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
* @param messageType Whether this is an Update or Rename message
*/
@JsonCreator
public UpdateOrRenameTableMessageBase(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final UpdatePayload<TableDto> payload,
final SNSMessageType messageType
) {
super(id, timestamp, requestId, messageType, name, payload);
}
}
| 9,708 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/CreateTableMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a table is created.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class CreateTableMessage extends SNSMessage<TableDto> {
/**
* Create a new CreateTableMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public CreateTableMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final TableDto payload
) {
super(id, timestamp, requestId, SNSMessageType.TABLE_CREATE, name, payload);
}
}
| 9,709 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/AddPartitionMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a partition is created.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class AddPartitionMessage extends SNSMessage<PartitionDto> {
/**
* Create a new AddPartitionMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public AddPartitionMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final PartitionDto payload
) {
super(id, timestamp, requestId, SNSMessageType.PARTITION_ADD, name, payload);
}
}
| 9,710 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/RenameTableMessage.java
|
/*
*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import com.netflix.metacat.common.dto.notifications.sns.payloads.UpdatePayload;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a table is renamed.
*
* @author rveeramacheneni
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class RenameTableMessage extends UpdateOrRenameTableMessageBase {
/**
* Create a new RenameTableMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public RenameTableMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final UpdatePayload<TableDto> payload
) {
super(id, timestamp, requestId, name, payload, SNSMessageType.TABLE_RENAME);
}
}
| 9,711 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/DeleteTableMessage.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.messages;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessage;
import com.netflix.metacat.common.dto.notifications.sns.SNSMessageType;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A message sent when a table is deleted.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString(callSuper = true)
@EqualsAndHashCode(callSuper = true)
public class DeleteTableMessage extends SNSMessage<TableDto> {
/**
* Create a new DeleteTableMessage.
*
* @param id The unique id of the message
* @param timestamp The number of milliseconds since epoch that this message occurred
* @param requestId The id of the API request that generated this and possibly other messages. Used for grouping
* @param name The qualified name of the resource that this notification is being generated for
* @param payload The payload of the notification
*/
@JsonCreator
public DeleteTableMessage(
@JsonProperty("id") final String id,
@JsonProperty("timestamp") final long timestamp,
@JsonProperty("requestId") final String requestId,
@JsonProperty("name") final String name,
@JsonProperty("payload") final TableDto payload
) {
super(id, timestamp, requestId, SNSMessageType.TABLE_DELETE, name, payload);
}
}
| 9,712 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/messages/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Specific notification messages which extend SNSMessage.
*
* @author tgianos
* @since 0.1.46
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.dto.notifications.sns.messages;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,713 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/payloads/UpdatePayload.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.payloads;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.github.fge.jsonpatch.JsonPatch;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* Represents the contents of an update payload.
*
* @param <T> The DTO type that was update. e.g. com.netflix.metacat.common.dto.TableDto
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString
@EqualsAndHashCode
public class UpdatePayload<T> {
private T previous;
private JsonPatch patch;
/**
* Create a new update payload.
*
* @param previous The previous version of the object that was updated
* @param patch The JSON patch to go from previous to current
*/
@JsonCreator
public UpdatePayload(
@JsonProperty("previous") final T previous,
@JsonProperty("patch") final JsonPatch patch
) {
this.previous = previous;
this.patch = patch;
}
}
| 9,714 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/payloads/TablePartitionsUpdatePayload.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.dto.notifications.sns.payloads;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import javax.annotation.Nullable;
import java.util.List;
/**
* Information about how the partitions have changed when a table was updated.
*
* @author tgianos
* @since 0.1.47
*/
@Getter
@ToString
@EqualsAndHashCode
public class TablePartitionsUpdatePayload {
private final String latestDeleteColumnValue;
private final int numCreatedPartitions;
private final int numDeletedPartitions;
private final String message;
private final List<String> partitionsUpdated;
/**
* Constructor.
* @param latestDeleteColumnValue The latest DeleteColumn value processed by microbot
* @param numCreatedPartitions The number of partitions that were created for the table
* @param numDeletedPartitions The number of partitions that were deleted from the table
* @param message The message about the partition ids.
* @param partitionsUpdated The list of ids of the partitions that were updated
*/
@JsonCreator
public TablePartitionsUpdatePayload(
@Nullable @JsonProperty("latestDeleteColumnValue") final String latestDeleteColumnValue,
@JsonProperty("numCreatedPartitions") final int numCreatedPartitions,
@JsonProperty("numDeletedPartitions") final int numDeletedPartitions,
@JsonProperty("message") final String message,
@JsonProperty("partitionsUpdated") final List<String> partitionsUpdated) {
this.latestDeleteColumnValue = latestDeleteColumnValue;
this.numCreatedPartitions = numCreatedPartitions;
this.numDeletedPartitions = numDeletedPartitions;
this.message = message;
this.partitionsUpdated = partitionsUpdated;
}
}
| 9,715 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/dto/notifications/sns/payloads/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Various payload representations for SNS Notifications.
*
* @author tgianos
* @since 0.1.47
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.dto.notifications.sns.payloads;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,716 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonLocator.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.json;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.node.ObjectNode;
import lombok.AllArgsConstructor;
import lombok.Getter;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Iterator;
import java.util.Map;
/**
* MetacatJson implementation.
*/
@AllArgsConstructor
@Getter
public class MetacatJsonLocator implements MetacatJson {
private final ObjectMapper objectMapper;
private final ObjectMapper prettyObjectMapper;
/**
* Constructor.
*/
public MetacatJsonLocator() {
objectMapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.setSerializationInclusion(JsonInclude.Include.ALWAYS);
prettyObjectMapper = objectMapper.copy().configure(SerializationFeature.INDENT_OUTPUT, true);
}
@Override
public <T> T convertValue(final Object fromValue, final Class<T> toValueType) throws IllegalArgumentException {
return objectMapper.convertValue(fromValue, toValueType);
}
@Override
@Nullable
public ObjectNode deserializeObjectNode(
@Nonnull final ObjectInputStream inputStream) throws IOException {
final boolean exists = inputStream.readBoolean();
ObjectNode json = null;
if (exists) {
final String s = inputStream.readUTF();
json = (ObjectNode) objectMapper.readTree(s);
}
return json;
}
@Override
public ObjectNode emptyObjectNode() {
return objectMapper.createObjectNode();
}
@Override
public void mergeIntoPrimary(
@Nonnull final ObjectNode primary,
@Nonnull final ObjectNode additional) {
try {
recursiveMerge(primary, additional);
} catch (MetacatJsonException e) {
throw new IllegalArgumentException("Unable to merge '" + additional + "' into '" + primary + "'");
}
}
@Nullable
@Override
public ObjectNode parseJsonObject(final String s) {
final JsonNode node;
try {
node = objectMapper.readTree(s);
} catch (Exception e) {
throw new MetacatJsonException(s, "Cannot convert '" + s + "' to a json object", e);
}
return node.isObject() ? (ObjectNode) node : null;
}
@Override
public <T> T parseJsonValue(final String s, final Class<T> clazz) {
try {
return objectMapper.readValue(s, clazz);
} catch (IOException e) {
throw new MetacatJsonException("Unable to convert '" + s + "' into " + clazz, e);
}
}
@Override
public <T> T parseJsonValue(final byte[] s, final Class<T> clazz) {
try {
return objectMapper.readValue(s, clazz);
} catch (IOException e) {
throw new MetacatJsonException("Unable to convert bytes into " + clazz, e);
}
}
private void recursiveMerge(final JsonNode primary, final JsonNode additional) {
if (!primary.isObject()) {
throw new MetacatJsonException("This should not be reachable");
}
final ObjectNode node = (ObjectNode) primary;
final Iterator<Map.Entry<String, JsonNode>> fields = additional.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
final String name = entry.getKey();
final JsonNode value = entry.getValue();
// Easiest case, if the primary node doesn't have the current field set the field on the primary
if (!node.has(name)) {
node.set(name, value);
} else if (!value.isObject()) {
// If the primary has the field but the incoming value is not an object set the field on the primary
node.set(name, value);
} else if (!node.get(name).isObject()) {
// If the primary is currently not an object, just overwrite it with the incoming value
node.set(name, value);
} else { // Otherwise recursively merge the new fields from the incoming object into the primary object
recursiveMerge(node.get(name), value);
}
}
}
@Override
public void serializeObjectNode(
@Nonnull final ObjectOutputStream outputStream,
@Nullable final ObjectNode json)
throws IOException {
final boolean exists = json != null;
outputStream.writeBoolean(exists);
if (exists) {
outputStream.writeUTF(json.toString());
}
}
@Override
public byte[] toJsonAsBytes(final Object o) {
try {
return objectMapper.writeValueAsBytes(o);
} catch (JsonProcessingException e) {
throw new MetacatJsonException(e);
}
}
@Override
public ObjectNode toJsonObject(final Object o) {
return objectMapper.valueToTree(o);
}
@Override
public String toJsonString(final Object o) {
try {
return objectMapper.writeValueAsString(o);
} catch (JsonProcessingException e) {
throw new MetacatJsonException(e);
}
}
}
| 9,717 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.json;
import lombok.Getter;
/**
* Metacat JSON utility related exception.
*/
@Getter
public class MetacatJsonException extends RuntimeException {
/**
* Input json string if any.
*/
private String inputJson;
/**
* Constructor.
*
* @param message exception message
*/
public MetacatJsonException(final String message) {
super(message);
}
/**
* Constructor.
*
* @param inputJson input json string
* @param message details of the message
* @param cause exception cause
*/
public MetacatJsonException(final String inputJson, final String message, final Throwable cause) {
super(message, cause);
this.inputJson = inputJson;
}
/**
* Constructor.
*
* @param cause exception cause
*/
public MetacatJsonException(final Throwable cause) {
super(cause);
}
/**
* Constructor.
*
* @param message exception message
* @param cause exception cause
*/
public MetacatJsonException(final String message, final Throwable cause) {
super(message, cause);
}
/**
* Default constructor.
*/
public MetacatJsonException() {
super();
}
}
| 9,718 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJson.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.json;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* JSON utility.
*/
public interface MetacatJson {
/**
* Convenience method for doing two-step conversion from given value, into
* instance of given value type. This is functionality equivalent to first
* serializing given value into JSON, then binding JSON data into value
* of given type, but may be executed without fully serializing into
* JSON. Same converters (serializers, deserializers) will be used as for
* data binding, meaning same object mapper configuration works.
*
* @param fromValue object to be converted
* @param toValueType POJO class to be converted to
* @param <T> POJO class
* @return Returns the converted POJO
* @throws MetacatJsonException If conversion fails due to incompatible type;
* if so, root cause will contain underlying checked exception data binding
* functionality threw
*/
<T> T convertValue(Object fromValue, Class<T> toValueType);
/**
* A helper for implementing Serializable. Reads a boolean to from the inputStream to determine of the next
* object is a json object and if it is it reads it and returns an object node.
*
* @param inputStream the serilization input stream
* @return a json object if one is the next object otherwise null
* @throws IOException on an error reading from the stream or a json serilization error.
*/
@Nullable
ObjectNode deserializeObjectNode(
@Nonnull
ObjectInputStream inputStream) throws IOException;
/**
* Returns an empty object node.
*
* @return an empty object node
*/
ObjectNode emptyObjectNode();
/**
* Returns default ObjectMapper used by this instance.
*
* @return The default ObjectMapper used by this instance.
*/
ObjectMapper getObjectMapper();
/**
* Returns default ObjectMapper used by this instance configured to pretty print.
*
* @return The default ObjectMapper used by this instance configured to pretty print.
*/
ObjectMapper getPrettyObjectMapper();
/**
* Merge primary and additional json nodes.
*
* @param primary first json node
* @param additional second json node
*/
void mergeIntoPrimary(
@Nonnull
ObjectNode primary,
@Nonnull
ObjectNode additional);
/**
* Parses the given string as json and returns an ObjectNode representing the json. Assumes the json is of a
* json object
*
* @param s a string representing a json object
* @return an object node representation of the string
* @throws MetacatJsonException if unable to convert the string to json or the json isn't a json object.
*/
ObjectNode parseJsonObject(String s);
/**
* Parses the given JSON value.
*
* @param s json string
* @param clazz class
* @param <T> type of the class
* @return object
*/
<T> T parseJsonValue(String s, Class<T> clazz);
/**
* Parses the given JSON value.
*
* @param s json byte array
* @param clazz class
* @param <T> type of the class
* @return object
*/
<T> T parseJsonValue(byte[] s, Class<T> clazz);
/**
* Serializes the JSON.
*
* @param outputStream output stream
* @param json json node
* @throws IOException exception
*/
void serializeObjectNode(
@Nonnull
ObjectOutputStream outputStream,
@Nullable
ObjectNode json) throws IOException;
/**
* Converts JSON as bytes.
*
* @param o object
* @return byte array
*/
byte[] toJsonAsBytes(Object o);
/**
* Converts an object to JSON.
*
* @param o object
* @return JSON node
*/
ObjectNode toJsonObject(Object o);
/**
* Converts an object to JSON string.
*
* @param o object
* @return JSON string
*/
String toJsonString(Object o);
}
| 9,719 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/json/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Contains the utilities for JSON serialization/deserialization.
*
* @author amajumdar
*/
package com.netflix.metacat.common.json;
| 9,720 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/AbstractType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* Abstract type class.
*
* @author zhenl
*/
@Getter
@EqualsAndHashCode
public abstract class AbstractType implements Type {
private final TypeSignature typeSignature;
AbstractType(final TypeSignature typeSignature) {
this.typeSignature = typeSignature;
}
/**
* get display name.
*
* @return name
*/
public String getDisplayName() {
return typeSignature.toString();
}
}
| 9,721 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/Type.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
/**
* Canonical type interface.
*
* @author zhenl
*/
public interface Type {
/**
* Returns the signature of this type that should be displayed to end-users.
*
* @return signature
*/
TypeSignature getTypeSignature();
/**
* get display name.
*
* @return name
*/
String getDisplayName();
}
| 9,722 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/TypeUtils.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.Collection;
/**
* Type util class.
*
* @author zhenl
*/
public final class TypeUtils {
private TypeUtils() {
}
/**
* parameterizedTypeName.
*
* @param baseType baseType
* @param argumentNames args
* @return type signature
*/
public static TypeSignature parameterizedTypeSignature(
final TypeEnum baseType,
final TypeSignature... argumentNames
) {
return new TypeSignature(baseType, ImmutableList.copyOf(argumentNames), ImmutableList.of());
}
/**
* Check if the collection is null or empty.
*
* @param collection collection
* @return boolean
*/
public static boolean isNullOrEmpty(final Collection<?> collection) {
return collection == null || collection.isEmpty();
}
/**
* CheckType.
*
* @param value value
* @param target type
* @param name name
* @param <A> A
* @param <B> B
* @return B
*/
public static <A, B extends A> B checkType(final A value, final Class<B> target, final String name) {
Preconditions.checkNotNull(value, "%s is null", name);
Preconditions.checkArgument(target.isInstance(value),
"%s must be of type %s, not %s",
name,
target.getName(),
value.getClass().getName());
return target.cast(value);
}
}
| 9,723 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/TypeManager.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import java.util.List;
/**
* Type manager interface.
*
* @author zhenl
*/
public interface TypeManager {
/**
* Gets the type with the signature, or null if not found.
*
* @param signature type signature
* @return Type
*/
Type getType(TypeSignature signature);
/**
* Get the type with the specified paramenters, or null if not found.
*
* @param baseType baseType
* @param typeParameters typeParameters
* @param literalParameters literalParameters
* @return Type
*/
Type getParameterizedType(TypeEnum baseType, List<TypeSignature> typeParameters, List<Object> literalParameters);
/**
* Gets a list of all registered types.
*
* @return list types.
*/
List<Type> getTypes();
}
| 9,724 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/TypeEnum.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.common.type;
import lombok.Getter;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Canonical base type class.
*
* @author zhenl
*/
@Getter
public enum TypeEnum {
/**
* Numeric Types.
* small int 2-byte signed integer from -32,768 to 32,767.
*/
SMALLINT("smallint", false),
/**
* tinyint 1-byte signed integer, from -128 to 127.
*/
TINYINT("tinyint", false),
/**
* int 4-byte signed integer, from -2,147,483,648 to 2,147,483,647.
*/
INT("int", false),
/**
* bigint 8-byte signed integer, from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807.
*/
BIGINT("bigint", false),
/**
* float 4-byte single precision floating point number.
*/
FLOAT("float", false),
/**
* double 8-byte double precision floating point number.
*/
DOUBLE("double", false),
/**
* decimal type user definable precision and scale.
*/
DECIMAL("decimal", true),
/**
* char fixed length less than or equals to 255.
*/
CHAR("char", true),
/**
* varchar created with a length specifier (between 1 and 65355).
*/
VARCHAR("varchar", true),
/**
* string type.
*/
STRING("string", false),
/**
* json json string.
*/
JSON("json", false),
/**
* boolean type.
*/
BOOLEAN("boolean", false),
/**
* varbinary type.
*/
VARBINARY("varbinary", true),
/**
* date year/month/day in the form YYYY-MM-DD.
*/
DATE("date", false),
/**
* time traditional UNIX timestamp with optional nanosecond precision.
*/
TIME("time", false),
/**
* time with time zone.
*/
TIME_WITH_TIME_ZONE("time with time zone", false),
/**
* timestamp type.
*/
TIMESTAMP("timestamp", false),
/**
* timestamp with time zone type.
*/
TIMESTAMP_WITH_TIME_ZONE("timestamp with time zone", false),
/**
* Year to month intervals, format: SY-M
* S: optional sign (+/-)
* Y: year count
* M: month count
* example INTERVAL '1-2' YEAR TO MONTH.
**/
INTERVAL_YEAR_TO_MONTH("interval year to month", false),
/**
* Day to second intervals, format: SD H:M:S.nnnnnn
* S: optional sign (+/-)
* D: day countH: hours
* M: minutes
* S: seconds
* nnnnnn: optional nanotime
* example INTERVAL '1 2:3:4.000005' DAY.
*/
INTERVAL_DAY_TO_SECOND("interval day to second", false),
/**
* unknown type.
*/
UNKNOWN("unknown", false),
/**
* array type.
*/
ARRAY("array", true),
/**
* row type.
*/
ROW("row", true),
/**
* map type.
*/
MAP("map", true);
private final String type;
private final boolean isParametricType;
TypeEnum(@Nonnull @NonNull final String type, final boolean isParametricType) {
this.type = type;
this.isParametricType = isParametricType;
}
/**
* Return name of the base type.
*
* @param name name
* @return TypeEnum type
*/
public static TypeEnum fromName(final String name) {
try {
final String typeName = name.trim().toUpperCase().replace(' ', '_');
return TypeEnum.valueOf(typeName);
} catch (final Exception e) {
return UNKNOWN;
}
}
}
| 9,725 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/ParametricType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import java.util.List;
/**
* Parametric type.
*
* @author zhenl
*/
public interface ParametricType extends Type {
/**
* Get type name.
*
* @return string
*/
TypeEnum getBaseType();
/**
* Create type.
*
* @param types types
* @param literals literals
* @return type
*/
Type createType(List<Type> types, List<Object> literals);
/**
* Returns the list of parameters.
*
* @return List of paramenters
*/
List<Type> getParameters();
}
| 9,726 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/DecimalType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.util.ArrayList;
import java.util.List;
/**
* Decimal type class.
*
* @author zhenl
*/
@Getter
@EqualsAndHashCode(callSuper = true)
public final class DecimalType extends AbstractType implements ParametricType {
/**
* Default decimal type.
*/
public static final DecimalType DECIMAL = createDecimalType();
/**
* If scale is not specified, it defaults to 0 (no fractional digits).
*/
private static final int DEFAULT_SCALE = 0;
/**
* If no precision is specified, it defaults to 10.
*/
private static final int DEFAULT_PRECISION = 10;
private final int precision;
private final int scale;
private DecimalType(final int precision, final int scale) {
super(
new TypeSignature(
TypeEnum.DECIMAL,
new ArrayList<TypeSignature>(),
Lists.<Object>newArrayList(
(long) precision,
(long) scale
)
)
);
Preconditions.checkArgument(precision >= 0, "Invalid decimal precision " + precision);
Preconditions.checkArgument(scale >= 0 && scale <= precision, "Invalid decimal scale " + scale);
this.precision = precision;
this.scale = scale;
}
/**
* Constructor.
*
* @param precision precision
* @param scale scale
* @return DecimalType
*/
public static DecimalType createDecimalType(final int precision, final int scale) {
return new DecimalType(precision, scale);
}
/**
* Constructor.
*
* @param precision precision
* @return DecimalType
*/
public static DecimalType createDecimalType(final int precision) {
return createDecimalType(precision, DEFAULT_SCALE);
}
/**
* Constructor.
*
* @return DecimalType
*/
public static DecimalType createDecimalType() {
return createDecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of();
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.DECIMAL;
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
switch (literals.size()) {
case 0:
return DecimalType.createDecimalType();
case 1:
try {
return DecimalType.createDecimalType(Integer.parseInt(String.valueOf(literals.get(0))));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Decimal precision must be a number");
}
case 2:
try {
return DecimalType.createDecimalType(Integer.parseInt(String.valueOf(literals.get(0))),
Integer.parseInt(String.valueOf(literals.get(1))));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Decimal parameters must be a number");
}
default:
throw new IllegalArgumentException("Expected 0, 1 or 2 parameters for DECIMAL type constructor.");
}
}
}
| 9,727 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/RowType.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
/**
* Row type.
*
* @author tgianos
* @author zhenl
* @since 1.0.0
*/
@Getter
public class RowType extends AbstractType implements ParametricType {
/**
* default type.
*/
static final RowType ROW = new RowType(Collections.<RowField>emptyList());
private final List<RowField> fields;
/**
* Constructor.
*
* @param fields The fields of this row
*/
public RowType(@Nonnull @NonNull final List<RowField> fields) {
super(
new TypeSignature(
TypeEnum.ROW,
Lists.transform(
Lists.transform(
fields,
new Function<RowField, Type>() {
public Type apply(@Nullable final RowField input) {
return input == null ? null : input.getType();
}
}
),
new Function<Type, TypeSignature>() {
public TypeSignature apply(@Nullable final Type input) {
return input == null ? null : input.getTypeSignature();
}
}),
Lists.transform(fields, new Function<RowField, Object>() {
public Object apply(@Nullable final RowField input) {
return input == null ? null : input.getName();
}
}
)
)
);
this.fields = ImmutableList.copyOf(fields);
}
/**
* Create a new Row Type.
*
* @param types The types to create can not be empty
* @param names The literals to use. Can be null but if not must be the same length as types.
* @return a new RowType
*/
public static RowType createRowType(
@Nonnull @NonNull final List<Type> types,
@Nonnull @NonNull final List<String> names
) {
Preconditions.checkArgument(!types.isEmpty(), "types is empty");
final ImmutableList.Builder<RowField> builder = ImmutableList.builder();
Preconditions.checkArgument(
types.size() == names.size(),
"types and names must be matched in size"
);
for (int i = 0; i < types.size(); i++) {
builder.add(new RowField(types.get(i), names.get(i)));
}
return new RowType(builder.build());
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.ROW;
}
/**
* {@inheritDoc}
*/
@Override
public RowType createType(@Nonnull @NonNull final List<Type> types, @Nonnull @NonNull final List<Object> literals) {
final ImmutableList.Builder<String> builder = ImmutableList.builder();
for (final Object literal : literals) {
builder.add(TypeUtils.checkType(literal, String.class, "literal"));
}
return RowType.createRowType(types, builder.build());
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
final ImmutableList.Builder<Type> result = ImmutableList.builder();
for (final RowField field : this.fields) {
result.add(field.getType());
}
return result.build();
}
/**
* Row field.
*/
@Getter
@EqualsAndHashCode
public static class RowField {
private final Type type;
private final String name;
/**
* constructor.
*
* @param type type
* @param name name
*/
public RowField(@Nonnull @NonNull final Type type, @Nonnull @NonNull final String name) {
this.type = type;
this.name = name;
}
}
}
| 9,728 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/TypeRegistry.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* Type mapping between canonical and connector types.
*
* @author zhenl
*/
public final class TypeRegistry implements TypeManager {
//initailzed during class loading
private static final TypeRegistry INSTANCE = new TypeRegistry();
private final ConcurrentMap<TypeSignature, Type> types = new ConcurrentHashMap<>();
private final ConcurrentMap<TypeEnum, ParametricType> parametricTypes = new ConcurrentHashMap<>();
/**
* Constructor.
*/
private TypeRegistry() {
Preconditions.checkNotNull(types, "types is null");
addType(BaseType.UNKNOWN);
addType(BaseType.BIGINT);
addType(BaseType.BOOLEAN);
addType(BaseType.FLOAT);
addType(BaseType.DOUBLE);
addType(BaseType.DATE);
addType(BaseType.INT);
addType(BaseType.SMALLINT);
addType(BaseType.TINYINT);
addType(BaseType.JSON);
addType(BaseType.TIME);
addType(BaseType.TIME_WITH_TIME_ZONE);
addType(BaseType.INTERVAL_DAY_TO_SECOND);
addType(BaseType.INTERVAL_YEAR_TO_MONTH);
addType(BaseType.STRING);
addType(BaseType.TIMESTAMP);
addType(BaseType.TIMESTAMP_WITH_TIME_ZONE);
addParametricType(DecimalType.DECIMAL);
addParametricType(CharType.CHAR);
addParametricType(MapType.MAP);
addParametricType(RowType.ROW);
addParametricType(ArrayType.ARRAY);
addParametricType(VarbinaryType.VARBINARY);
addParametricType(VarcharType.VARCHAR);
}
public static TypeRegistry getTypeRegistry() {
return INSTANCE;
}
/**
* Verify type class isn't null.
*
* @param type parameter
*/
public static void verifyTypeClass(final Type type) {
Preconditions.checkNotNull(type, "type is null");
}
/**
* {@inheritDoc}
*/
@Override
public Type getType(final TypeSignature signature) {
final Type type = types.get(signature);
if (type == null) {
return instantiateParametricType(signature);
}
return type;
}
/**
* {@inheritDoc}
*/
@Override
public Type getParameterizedType(final TypeEnum baseType,
final List<TypeSignature> typeParameters,
final List<Object> literalParameters) {
return getType(new TypeSignature(baseType, typeParameters, literalParameters));
}
private Type instantiateParametricType(final TypeSignature signature) {
final ImmutableList.Builder<Type> parameterTypes = ImmutableList.builder();
for (TypeSignature parameter : signature.getParameters()) {
final Type parameterType = getType(parameter);
if (parameterType == null) {
return null;
}
parameterTypes.add(parameterType);
}
final ParametricType parametricType = parametricTypes.get(signature.getBase());
if (parametricType == null) {
return null;
}
final Type instantiatedType = parametricType.createType(parameterTypes.build(),
signature.getLiteralParameters());
Preconditions.checkState(instantiatedType.getTypeSignature().equals(signature),
"Instantiated parametric type name (%s) does not match expected name (%s)",
instantiatedType, signature);
return instantiatedType;
}
/**
* Add valid type to registry.
*
* @param type type
*/
public void addType(final Type type) {
verifyTypeClass(type);
final Type existingType = types.putIfAbsent(type.getTypeSignature(), type);
Preconditions.checkState(existingType == null
|| existingType.equals(type), "Type %s is already registered", type);
}
/**
* Add complex type to regiestry.
*
* @param parametricType Type
*/
public void addParametricType(final ParametricType parametricType) {
final TypeEnum baseType = parametricType.getBaseType();
Preconditions.checkArgument(!parametricTypes.containsKey(baseType),
"Parametric type already registered: %s", baseType);
parametricTypes.putIfAbsent(baseType, parametricType);
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getTypes() {
return null;
}
}
| 9,729 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/CharType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.collect.ImmutableList;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Char type class.
*
* @author zhenl
*/
@Getter
@EqualsAndHashCode(callSuper = true)
public class CharType extends AbstractType implements ParametricType {
/**
* Default character type.
*/
public static final CharType CHAR = new CharType(1);
private final int length;
/**
* Constructor.
*
* @param length length
*/
public CharType(final int length) {
super(
new TypeSignature(
TypeEnum.CHAR,
new ArrayList<TypeSignature>(),
Collections.<Object>singletonList((long) length)));
if (length < 0) {
throw new IllegalArgumentException("Invalid VARCHAR length " + length);
}
this.length = length;
}
/**
* Creates the character type.
*
* @param length legnth of the type
* @return CharType
*/
public static CharType createCharType(final int length) {
return new CharType(length);
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.CHAR;
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
if (literals.isEmpty()) {
return createCharType(1);
}
if (literals.size() != 1) {
throw new IllegalArgumentException("Expected at most one parameter for CHAR");
}
try {
return createCharType(Integer.parseInt(String.valueOf(literals.get(0))));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("CHAR length must be a number");
}
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of();
}
}
| 9,730 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/VarcharType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.Getter;
import java.util.ArrayList;
import java.util.List;
/**
* Varchar type.
*
* @author zhenl
*/
@Getter
public final class VarcharType extends AbstractType implements ParametricType {
/**
* Default varchar type.
*/
public static final VarcharType VARCHAR = new VarcharType(1);
private final int length;
private VarcharType(final int length) {
super(
new TypeSignature(
TypeEnum.VARCHAR, new ArrayList<TypeSignature>(),
Lists.<Object>newArrayList((long) length)));
if (length < 0) {
throw new IllegalArgumentException("Invalid VARCHAR length " + length);
}
this.length = length;
}
/**
* Cretes varchar type.
*
* @param length length
* @return VarcharType
*/
public static VarcharType createVarcharType(final int length) {
return new VarcharType(length);
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.VARCHAR;
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of();
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
if (literals.isEmpty()) {
return createVarcharType(1);
}
if (literals.size() != 1) {
throw new IllegalArgumentException("Expected at most one parameter for VARCHAR");
}
try {
return createVarcharType(Integer.parseInt(String.valueOf(literals.get(0))));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("VARCHAR length must be a number");
}
}
}
| 9,731 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/MapType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.util.List;
/**
* Map type class.
*
* @author zhenl
*/
@Getter
@EqualsAndHashCode(callSuper = true)
public class MapType extends AbstractType implements ParametricType {
/**
* default.
*/
public static final MapType MAP = new MapType(BaseType.UNKNOWN, BaseType.UNKNOWN);
private final Type keyType;
private final Type valueType;
/**
* Constructor.
*
* @param keyType keytype
* @param valueType valuetype
*/
public MapType(final Type keyType, final Type valueType) {
super(TypeUtils.parameterizedTypeSignature(TypeEnum.MAP,
keyType.getTypeSignature(), valueType.getTypeSignature()));
this.keyType = keyType;
this.valueType = valueType;
}
/**
* {@inheritDoc}
*/
@Override
public String getDisplayName() {
return "map<" + keyType.getDisplayName()
+ ", " + valueType.getDisplayName() + ">";
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of(getKeyType(), getValueType());
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.MAP;
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
Preconditions.checkArgument(types.size() == 2, "Expected two types");
Preconditions.checkArgument(literals.isEmpty(), "Unexpected literals: %s", literals);
return new MapType(types.get(0), types.get(1));
}
}
| 9,732 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/VarbinaryType.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.common.type;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.Getter;
import java.util.ArrayList;
import java.util.List;
/**
* VarbinaryType type.
*
* @author zhenl
*/
@Getter
public final class VarbinaryType extends AbstractType implements ParametricType {
/**
* Default VarbinaryType type.
*/
public static final VarbinaryType VARBINARY = new VarbinaryType(Integer.MAX_VALUE);
private final int length;
private VarbinaryType(final int length) {
super(new TypeSignature(
TypeEnum.VARBINARY,
new ArrayList<TypeSignature>(),
Lists.<Object>newArrayList((long) length)));
if (length < 0) {
throw new IllegalArgumentException("Invalid VARBINARY length " + length);
}
this.length = length;
}
/**
* Creates VarbinaryType.
*
* @param length length
* @return VarcharType
*/
public static VarbinaryType createVarbinaryType(final int length) {
return new VarbinaryType(length);
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.VARBINARY;
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of();
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
if (literals.isEmpty()) {
return createVarbinaryType(Integer.MAX_VALUE);
}
if (literals.size() != 1) {
throw new IllegalArgumentException("Expected at most one parameter for VARBINARY");
}
try {
return createVarbinaryType(Integer.parseInt(String.valueOf(literals.get(0))));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("VARBINARY length must be a number");
}
}
}
| 9,733 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/BaseType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import lombok.EqualsAndHashCode;
/**
* TypeEnum class implements the type interface.
*
* @author zhenl
*/
@EqualsAndHashCode(callSuper = true)
public class BaseType extends AbstractType {
/**
* BOOLEAN type.
*/
public static final Type BOOLEAN = createBaseType(TypeEnum.BOOLEAN);
/**
* TINYINT type.
*/
public static final Type TINYINT = createBaseType(TypeEnum.TINYINT);
/**
* SMALLINT type.
*/
public static final Type SMALLINT = createBaseType(TypeEnum.SMALLINT);
/**
* INT type.
*/
public static final Type INT = createBaseType(TypeEnum.INT);
/**
* BIGINT type.
*/
public static final Type BIGINT = createBaseType(TypeEnum.BIGINT);
/**
* FLOAT type.
*/
public static final Type FLOAT = createBaseType(TypeEnum.FLOAT);
/**
* DOUBLE type.
*/
public static final Type DOUBLE = createBaseType(TypeEnum.DOUBLE);
/**
* STRING type.
*/
public static final Type STRING = createBaseType(TypeEnum.STRING);
/**
* JSON type.
*/
public static final Type JSON = createBaseType(TypeEnum.JSON);
/**
* DATE type.
*/
public static final Type DATE = createBaseType(TypeEnum.DATE);
/**
* TIME type.
*/
public static final Type TIME = createBaseType(TypeEnum.TIME);
/**
* TIME_WITH_TIME_ZONE type.
*/
public static final Type TIME_WITH_TIME_ZONE = createBaseType(TypeEnum.TIME_WITH_TIME_ZONE);
/**
* TIMESTAMP type.
*/
public static final Type TIMESTAMP = createBaseType(TypeEnum.TIMESTAMP);
/**
* TIMESTAMP_WITH_TIME_ZONE type.
*/
public static final Type TIMESTAMP_WITH_TIME_ZONE = createBaseType(TypeEnum.TIMESTAMP_WITH_TIME_ZONE);
/**
* INTERVAL_YEAR_TO_MONTH type.
*/
public static final Type INTERVAL_YEAR_TO_MONTH = createBaseType(TypeEnum.INTERVAL_YEAR_TO_MONTH);
/**
* INTERVAL_DAY_TO_SECOND type.
*/
public static final Type INTERVAL_DAY_TO_SECOND = createBaseType(TypeEnum.INTERVAL_DAY_TO_SECOND);
/**
* UNKNOWN.
*/
public static final Type UNKNOWN = createBaseType(TypeEnum.UNKNOWN);
/**
* BaseType constructor.
*
* @param signature base type
*/
public BaseType(final TypeSignature signature) {
super(signature);
}
private static BaseType createBaseType(final TypeEnum baseType) {
return new BaseType(new TypeSignature(baseType));
}
}
| 9,734 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/TypeSignature.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonValue;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Type signature class.
*
* @author zhenl
*/
@Getter
@EqualsAndHashCode
public class TypeSignature {
protected final TypeEnum base;
protected final List<TypeSignature> parameters;
protected final List<Object> literalParameters;
/**
* Type signature constructor.
*
* @param base base type
*/
public TypeSignature(@Nonnull @NonNull final TypeEnum base) {
this.base = base;
this.parameters = Lists.newArrayList();
this.literalParameters = Lists.newArrayList();
}
/**
* Type signature constructor.
*
* @param base base type
* @param parameters type parameter
* @param literalParameters literal parameter
*/
public TypeSignature(
@Nonnull @NonNull final TypeEnum base,
@Nonnull @NonNull final List<TypeSignature> parameters,
@Nullable final List<Object> literalParameters
) {
if (literalParameters != null) {
for (final Object literal : literalParameters) {
Preconditions.checkArgument(
literal instanceof String || literal instanceof Long,
"Unsupported literal type: %s", literal.getClass());
}
this.literalParameters = ImmutableList.copyOf(literalParameters);
} else {
this.literalParameters = ImmutableList.copyOf(Lists.newArrayList());
}
this.base = base;
this.parameters = Collections.unmodifiableList(new ArrayList<>(parameters));
}
/**
* Type signature constructor.
*
* @param base base type
* @param parameters type parameter
* @param literalParameters literal parameter
*/
private TypeSignature(
@Nonnull @NonNull final String base,
@Nonnull @NonNull final List<TypeSignature> parameters,
@Nullable final List<Object> literalParameters
) {
this(TypeEnum.fromName(base), parameters, literalParameters);
}
/**
* Parse Type Signature.
*
* @param signature signature string
* @return TypeSignature
*/
@JsonCreator
public static TypeSignature parseTypeSignature(final String signature) {
if (!signature.contains("<") && !signature.contains("(")) {
return new TypeSignature(signature, new ArrayList<TypeSignature>(), new ArrayList<>());
}
String baseName = null;
final List<TypeSignature> parameters = new ArrayList<>();
final List<Object> literalParameters = new ArrayList<>();
int parameterStart = -1;
int bracketCount = 0;
boolean inLiteralParameters = false;
for (int i = 0; i < signature.length(); i++) {
final char c = signature.charAt(i);
if (c == '<') {
if (bracketCount == 0) {
Preconditions.checkArgument(baseName == null, "Expected baseName to be null");
Preconditions.checkArgument(parameterStart == -1, "Expected parameter start to be -1");
baseName = signature.substring(0, i);
parameterStart = i + 1;
}
bracketCount++;
} else if (c == '>') {
bracketCount--;
Preconditions.checkArgument(bracketCount >= 0, "Bad type signature: '%s'", signature);
if (bracketCount == 0) {
Preconditions.checkArgument(parameterStart >= 0, "Bad type signature: '%s'", signature);
parameters.add(parseTypeSignature(signature.substring(parameterStart, i)));
parameterStart = i + 1;
if (i == signature.length() - 1) {
return new TypeSignature(baseName, parameters, literalParameters);
}
}
} else if (c == ',') {
if (bracketCount == 1 && !inLiteralParameters) {
Preconditions.checkArgument(parameterStart >= 0, "Bad type signature: '%s'", signature);
parameters.add(parseTypeSignature(signature.substring(parameterStart, i)));
parameterStart = i + 1;
} else if (bracketCount == 0 && inLiteralParameters) {
Preconditions.checkArgument(parameterStart >= 0, "Bad type signature: '%s'", signature);
literalParameters.add(parseLiteral(signature.substring(parameterStart, i)));
parameterStart = i + 1;
}
} else if (c == '(') {
Preconditions.checkArgument(!inLiteralParameters, "Bad type signature: '%s'", signature);
inLiteralParameters = true;
if (bracketCount == 0) {
if (baseName == null) {
Preconditions.checkArgument(parameters.isEmpty(), "Expected no parameters");
Preconditions.checkArgument(parameterStart == -1, "Expected parameter start to be -1");
baseName = signature.substring(0, i);
}
parameterStart = i + 1;
}
} else if (c == ')') {
Preconditions.checkArgument(inLiteralParameters, "Bad type signature: '%s'", signature);
inLiteralParameters = false;
if (bracketCount == 0) {
Preconditions.checkArgument(i == signature.length() - 1, "Bad type signature: '%s'", signature);
Preconditions.checkArgument(parameterStart >= 0, "Bad type signature: '%s'", signature);
literalParameters.add(parseLiteral(signature.substring(parameterStart, i)));
return new TypeSignature(baseName, parameters, literalParameters);
}
}
}
throw new IllegalArgumentException(String.format("Bad type signature: '%s'", signature));
}
private static Object parseLiteral(final String literal) {
if (literal.startsWith("'") || literal.endsWith("'")) {
Preconditions.checkArgument(literal.startsWith("'") && literal.endsWith("'"), "Bad literal: '%s'", literal);
return literal.substring(1, literal.length() - 1);
} else {
return Long.parseLong(literal);
}
}
/**
* {@inheritDoc}
*/
@Override
@JsonValue
public String toString() {
final StringBuilder typeName = new StringBuilder(base.getType());
if (!parameters.isEmpty()) {
typeName.append("<");
boolean first = true;
for (TypeSignature parameter : parameters) {
if (!first) {
typeName.append(",");
}
first = false;
typeName.append(parameter.toString());
}
typeName.append(">");
}
if (!literalParameters.isEmpty()) {
typeName.append("(");
boolean first = true;
for (Object parameter : literalParameters) {
if (!first) {
typeName.append(",");
}
first = false;
if (parameter instanceof String) {
typeName.append("'").append(parameter).append("'");
} else {
typeName.append(parameter.toString());
}
}
typeName.append(")");
}
return typeName.toString();
}
}
| 9,735 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/ArrayType.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.type;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import lombok.Getter;
import java.util.List;
/**
* Array type class.
*
* @author zhenl
*/
@Getter
public class ArrayType extends AbstractType implements ParametricType {
/**
* default.
*/
static final ArrayType ARRAY = new ArrayType(BaseType.UNKNOWN);
private final Type elementType;
/**
* Constructor.
*
* @param elementType elementtype
*/
public ArrayType(final Type elementType) {
super(TypeUtils.parameterizedTypeSignature(TypeEnum.ARRAY, elementType.getTypeSignature()));
this.elementType = Preconditions.checkNotNull(elementType, "elementType is null");
}
/**
* {@inheritDoc}
*/
@Override
public TypeEnum getBaseType() {
return TypeEnum.ARRAY;
}
/**
* {@inheritDoc}
*/
@Override
public Type createType(final List<Type> types, final List<Object> literals) {
Preconditions.checkArgument(types.size() == 1, "Expected only one type, got %s", types);
Preconditions.checkArgument(literals.isEmpty(), "Unexpected literals: %s", literals);
return new ArrayType(types.get(0));
}
/**
* {@inheritDoc}
*/
@Override
public List<Type> getParameters() {
return ImmutableList.of(getElementType());
}
}
| 9,736 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/type/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Canonical type classes.
*
* @author zhenl
*/
package com.netflix.metacat.common.type;
| 9,737 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatUserMetadataException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Exception from user metadata service.
* TODO: This should be replaced by a BadRequestException from JAX-RS 2.x once we support the newer JAX-RS version.
*/
public class MetacatUserMetadataException extends MetacatException {
/**
* Constructor.
*
* @param message exception message
*/
public MetacatUserMetadataException(final String message) {
super(message);
}
}
| 9,738 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatTooManyRequestsException.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Exception for too many RDS connections or pool empty.
*
* @author zhenl
*/
public class MetacatTooManyRequestsException extends MetacatException {
/**
* Constructor.
*
* @param reason exception message
*/
public MetacatTooManyRequestsException(final String reason) {
super(reason);
}
}
| 9,739 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatPreconditionFailedException.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Exception when operation failed precondition. Ex. if a locked table is updated, this exception will be thrown.
*
* @since 1.2.0
* @author amajumdar
*/
public class MetacatPreconditionFailedException extends MetacatException {
/**
* Constructor.
*
* @param reason exception message
*/
public MetacatPreconditionFailedException(final String reason) {
super(reason);
}
}
| 9,740 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotSupportedException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Metacat not supported exception.
*/
public class MetacatNotSupportedException extends MetacatException {
/**
* Constructor.
*
* @param message exception message
*/
public MetacatNotSupportedException(final String message) {
super(message);
}
}
| 9,741 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotFoundException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* TODO: This should be replaced by a NotFoundException from JAX-RS 2.x once we support the newer JAX-RS version.
*/
public class MetacatNotFoundException extends MetacatException {
/**
* Constructor.
*
* @param message exception message
*/
public MetacatNotFoundException(final String message) {
super(message);
}
}
| 9,742 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Base exception for Metacat errors exposed externally.
*
* @author amajumdar
* @author tgianos
*/
public class MetacatException extends RuntimeException {
/**
* Constructor.
*/
public MetacatException() {
super();
}
/**
* Constructor.
*
* @param msg The error message to pass along
*/
public MetacatException(final String msg) {
super(msg);
}
/**
* Constructor.
*
* @param msg The error message to pass along
* @param cause The cause of the error
*/
public MetacatException(final String msg, final Throwable cause) {
super(msg, cause);
}
}
| 9,743 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatBadRequestException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* TODO: This should be replaced by a BadRequestException from JAX-RS 2.x once we support the newer JAX-RS version.
*/
public class MetacatBadRequestException extends MetacatException {
/**
* Constructor.
*
* @param reason exception message
*/
public MetacatBadRequestException(final String reason) {
super(reason);
}
}
| 9,744 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatAlreadyExistsException.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* Metacat exception for already exists entities.
*/
public class MetacatAlreadyExistsException extends MetacatException {
/**
* Constructor.
*
* @param message exception message
*/
public MetacatAlreadyExistsException(final String message) {
super(message);
}
}
| 9,745 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatUnAuthorizedException.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.exception;
/**
* MetacatUnAuthorizedException Exception.
* @author zhenl
* @since 1.2.0
*/
public class MetacatUnAuthorizedException extends MetacatException {
/**
* Constructor.
*
* @param message message
*/
public MetacatUnAuthorizedException(final String message) {
super(message);
}
}
| 9,746 |
0 |
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common/src/main/java/com/netflix/metacat/common/exception/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Package containing the Metacat exceptions.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.exception;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,747 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/AbstractThriftServer.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.RegistryUtil;
import com.netflix.spectator.api.Registry;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.apache.thrift.TProcessor;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TServerEventHandler;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TServerTransport;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Base implementation for thrift server.
*/
@Slf4j
public abstract class AbstractThriftServer {
protected final Config config;
protected final Registry registry;
@Getter
private final int portNumber;
private final String threadPoolNameFormat;
private final AtomicBoolean stopping = new AtomicBoolean(false);
private final AtomicInteger serverThreadCount = new AtomicInteger(0);
@Getter
private TServer server;
protected AbstractThriftServer(
@NonNull final Config config,
@NonNull final Registry registry,
final int portNumber,
@NonNull final String threadPoolNameFormat
) {
this.config = config;
this.registry = registry;
this.portNumber = portNumber;
this.threadPoolNameFormat = threadPoolNameFormat;
}
/**
* Returns the thrift processor.
*
* @return thrift processor
*/
public abstract TProcessor getProcessor();
/**
* Returns the server event handler.
*
* @return server event handler
*/
public abstract TServerEventHandler getServerEventHandler();
/**
* Returns the server name.
*
* @return server name
*/
public abstract String getServerName();
/**
* Returns true, if the server event handler exists.
*
* @return true, if the server event handler exists
*/
public abstract boolean hasServerEventHandler();
/**
* Server initialization.
*
* @throws Exception error
*/
public void start() throws Exception {
log.info("initializing thrift server {}", getServerName());
final ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat(threadPoolNameFormat)
.setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception in thread: {}", t.getName(), e))
.build();
final ExecutorService executorService = new ThreadPoolExecutor(
Math.min(2, config.getThriftServerMaxWorkerThreads()),
config.getThriftServerMaxWorkerThreads(),
60L,
TimeUnit.SECONDS,
new SynchronousQueue<>(),
threadFactory
);
RegistryUtil.registerThreadPool(registry, threadPoolNameFormat, (ThreadPoolExecutor) executorService);
final int timeout = config.getThriftServerSocketClientTimeoutInSeconds() * 1000;
final TServerTransport serverTransport = new TServerSocket(portNumber, timeout);
startServing(executorService, serverTransport);
}
private void startServing(final ExecutorService executorService, final TServerTransport serverTransport) {
if (!stopping.get()) {
final TThreadPoolServer.Args serverArgs = new TThreadPoolServer.Args(serverTransport)
.processor(getProcessor())
.executorService(executorService);
server = new TThreadPoolServer(serverArgs);
if (hasServerEventHandler()) {
server.setServerEventHandler(getServerEventHandler());
}
final String threadName = getServerName() + "-thread-#" + serverThreadCount.incrementAndGet();
new Thread(threadName) {
@Override
public void run() {
log.debug("starting serving");
try {
server.serve();
} catch (Throwable t) {
if (!stopping.get()) {
log.error("Unexpected exception in {}. This probably "
+ "means that the worker pool was exhausted. "
+ "Increase 'metacat.thrift.server_max_worker_threads' "
+ "from {} or throttle the number of requests. "
+ "This server thread is not in a bad state so starting a new one.",
getServerName(), config.getThriftServerMaxWorkerThreads(), t);
startServing(executorService, serverTransport);
} else {
log.debug("stopping serving");
}
}
log.debug("started serving");
}
}.start();
}
}
/**
* Server shutdown.
*
* @throws Exception error
*/
public void stop() throws Exception {
log.info("stopping thrift server {}", getServerName());
if (stopping.compareAndSet(false, true) && server != null) {
log.debug("stopping serving");
server.stop();
log.debug("stopped serving");
}
}
}
| 9,748 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/CatalogThriftService.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.netflix.metacat.common.server.api.v1.MetacatV1;
import com.netflix.metacat.common.server.api.v1.PartitionV1;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.thrift.TProcessor;
import org.apache.thrift.server.TServerEventHandler;
/**
* Thrift service.
*/
public class CatalogThriftService extends AbstractThriftServer {
private final String catalogName;
private final HiveConverters hiveConverters;
private final MetacatV1 metacatV1;
private final PartitionV1 partitionV1;
/**
* Constructor.
*
* @param config config
* @param hiveConverters hive converter
* @param metacatV1 Metacat V1 resource
* @param partitionV1 Partition V1 resource
* @param catalogName catalog name
* @param portNumber port
* @param registry registry for spectator
*/
public CatalogThriftService(
final Config config,
final HiveConverters hiveConverters,
final MetacatV1 metacatV1,
final PartitionV1 partitionV1,
final String catalogName,
final int portNumber,
final Registry registry
) {
super(config, registry, portNumber, "thrift-pool-" + catalogName + "-" + portNumber + "-%d");
this.hiveConverters = hiveConverters;
this.metacatV1 = metacatV1;
this.partitionV1 = partitionV1;
this.catalogName = catalogName;
}
/**
* {@inheritDoc}
*/
@Override
public TProcessor getProcessor() {
return new ThriftHiveMetastore.Processor<>(
new CatalogThriftHiveMetastore(config, hiveConverters, metacatV1, partitionV1, catalogName, registry)
);
}
/**
* {@inheritDoc}
*/
@Override
public TServerEventHandler getServerEventHandler() {
return new CatalogThriftEventHandler();
}
/**
* {@inheritDoc}
*/
@Override
public String getServerName() {
return "thrift server for " + catalogName + " on port " + this.getPortNumber();
}
/**
* {@inheritDoc}
*/
@Override
public boolean hasServerEventHandler() {
return true;
}
}
| 9,749 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/CatalogThriftHiveMetastore.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.facebook.fb303.FacebookBase;
import com.facebook.fb303.FacebookService;
import com.facebook.fb303.fb_status;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseCreateRequestDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.FieldDto;
import com.netflix.metacat.common.dto.GetPartitionsRequestDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.PartitionsSaveRequestDto;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatAlreadyExistsException;
import com.netflix.metacat.common.exception.MetacatNotFoundException;
import com.netflix.metacat.common.exception.MetacatPreconditionFailedException;
import com.netflix.metacat.common.server.api.v1.MetacatV1;
import com.netflix.metacat.common.server.api.v1.PartitionV1;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
import org.apache.hadoop.hive.metastore.api.CompactionRequest;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.DropPartitionsResult;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.FireEventRequest;
import org.apache.hadoop.hive.metastore.api.FireEventResponse;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse;
import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest;
import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse;
import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
import org.apache.hadoop.hive.metastore.api.PartitionSpec;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.PartitionsStatsResult;
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TableStatsRequest;
import org.apache.hadoop.hive.metastore.api.TableStatsResult;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.thrift.TException;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* Metacat Hive thrift implementation. This uses the Metacat Resource classes.
*/
@Slf4j
public class CatalogThriftHiveMetastore extends FacebookBase
implements FacebookService.Iface, ThriftHiveMetastore.Iface {
private static final Joiner AND_JOINER = Joiner.on(" and ");
private static final LoadingCache<String, Pattern> PATTERNS = CacheBuilder.newBuilder()
.build(new CacheLoader<String, Pattern>() {
public Pattern load(
@Nonnull final String regex) {
return Pattern.compile(regex);
}
});
private final String catalogName;
private final Config config;
private final HiveConverters hiveConverters;
private final PartitionV1 partV1;
private final MetacatV1 v1;
private final Map<String, List<PrivilegeGrantInfo>> defaultRolesPrivilegeSet =
Maps.newHashMap(ImmutableMap.of("users",
Lists.newArrayList(new PrivilegeGrantInfo("ALL", 0, "hadoop", PrincipalType.ROLE, true))));
private final Registry registry;
/**
* Constructor.
*
* @param config config
* @param hiveConverters hive converter
* @param metacatV1 Metacat V1 resource
* @param partitionV1 Partition V1 resource
* @param catalogName catalog name
* @param registry registry of spectator
*/
public CatalogThriftHiveMetastore(
final Config config,
final HiveConverters hiveConverters,
final MetacatV1 metacatV1,
final PartitionV1 partitionV1,
final String catalogName,
final Registry registry
) {
super("CatalogThriftHiveMetastore");
this.config = Preconditions.checkNotNull(config, "config is null");
this.hiveConverters = Preconditions.checkNotNull(hiveConverters, "hive converters is null");
this.v1 = Preconditions.checkNotNull(metacatV1, "metacat api is null");
this.partV1 = Preconditions.checkNotNull(partitionV1, "partition api is null");
this.catalogName = normalizeIdentifier(Preconditions.checkNotNull(catalogName, "catalog name is required"));
this.registry = registry;
}
private static String normalizeIdentifier(@Nullable final String s) {
if (s == null) {
return null;
} else {
return s.trim().toLowerCase(Locale.ENGLISH);
}
}
/**
* {@inheritDoc}
*/
@Override
public void abort_txn(final AbortTxnRequest rqst) throws TException {
throw unimplemented("abort_txn", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void add_dynamic_partitions(final AddDynamicPartitions rqst) throws TException {
throw unimplemented("add_dynamic_partitions", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public Index add_index(final Index newIndex, final Table indexTable) throws TException {
throw unimplemented("add_index", new Object[]{newIndex, indexTable});
}
/**
* {@inheritDoc}
*/
@Override
public Partition add_partition(final Partition newPart) throws TException {
return add_partition_with_environment_context(newPart, null);
}
/**
* {@inheritDoc}
*/
@Override
public Partition add_partition_with_environment_context(
final Partition newPart,
@Nullable final EnvironmentContext ec
) throws TException {
final String dbName = normalizeIdentifier(newPart.getDbName());
final String tableName = normalizeIdentifier(newPart.getTableName());
return requestWrapper("add_partition_with_environment_context", new Object[]{dbName, tableName, ec}, () -> {
addPartitionsCore(dbName, tableName, ImmutableList.of(newPart), false);
return newPart;
});
}
/**
* {@inheritDoc}
*/
@Override
public int add_partitions(final List<Partition> newParts) throws TException {
if (newParts == null || newParts.size() == 0) {
return 0;
}
final String dbName = normalizeIdentifier(newParts.get(0).getDbName());
final String tableName = normalizeIdentifier(newParts.get(0).getTableName());
return requestWrapper("add_partition", new Object[]{dbName, tableName}, () -> {
addPartitionsCore(dbName, tableName, newParts, false);
return newParts.size();
});
}
/**
* {@inheritDoc}
*/
@Override
public int add_partitions_pspec(final List<PartitionSpec> newParts) throws TException {
if (newParts == null || newParts.isEmpty()) {
return 0;
}
final String dbName = newParts.get(0).getDbName();
final String tableName = newParts.get(0).getTableName();
return requestWrapper("add_partition", new Object[]{dbName, tableName}, () -> {
final PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(newParts);
final PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator();
final List<Partition> partitions = addPartitionsCore(dbName, tableName,
Lists.newArrayList(partitionIterator), false);
return partitions.size();
});
}
/**
* {@inheritDoc}
*/
@Override
public AddPartitionsResult add_partitions_req(final AddPartitionsRequest request) throws TException {
final String dbName = normalizeIdentifier(request.getDbName());
final String tableName = normalizeIdentifier(request.getTblName());
return requestWrapper("add_partition", new Object[]{dbName, tableName}, () -> {
final List<Partition> partitions = addPartitionsCore(dbName, tableName, request.getParts(),
request.isIfNotExists());
final AddPartitionsResult result = new AddPartitionsResult();
result.setPartitions(partitions);
return result;
});
}
private List<Partition> addPartitionsCore(final String dbName, final String tblName, final List<Partition> parts,
final boolean ifNotExists)
throws TException {
log.debug("Ignoring {} since metacat save partitions will do an update if it already exists", ifNotExists);
final TableDto tableDto = v1.getTable(catalogName, dbName, tblName, true, false, false);
final List<String> partitionKeys = tableDto.getPartition_keys();
if (partitionKeys == null || partitionKeys.isEmpty()) {
throw new MetaException("Unable to add partition to unpartitioned table: " + tableDto.getName());
}
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
final List<PartitionDto> converted = Lists.newArrayListWithCapacity(parts.size());
for (Partition partition : parts) {
converted.add(hiveConverters.hiveToMetacatPartition(tableDto, partition));
}
partitionsSaveRequestDto.setPartitions(converted);
partV1.savePartitions(catalogName, dbName, tblName, partitionsSaveRequestDto);
return parts;
}
/**
* {@inheritDoc}
*/
@Override
public void alter_database(final String dbname, final Database db) throws TException {
requestWrapper("update_database", new Object[]{db}, () -> {
if (dbname == null || db == null) {
throw new InvalidInputException("Invalid database request");
}
v1.updateDatabase(catalogName, normalizeIdentifier(dbname),
DatabaseCreateRequestDto.builder().metadata(db.getParameters()).uri(db.getLocationUri()).build());
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public void alter_function(final String dbName, final String funcName, final Function newFunc) throws TException {
throw unimplemented("alter_function", new Object[]{dbName, funcName, newFunc});
}
/**
* {@inheritDoc}
*/
@Override
public void alter_index(final String dbname, final String baseTblName, final String idxName,
final Index newIdx) throws TException {
throw unimplemented("alter_index", new Object[]{dbname, baseTblName, idxName, newIdx});
}
/**
* {@inheritDoc}
*/
@Override
public void alter_partition(final String dbName, final String tblName, final Partition newPart) throws TException {
alter_partition_with_environment_context(dbName, tblName, newPart, null);
}
/**
* {@inheritDoc}
*/
@Override
public void alter_partition_with_environment_context(
final String dbName,
final String tblName,
final Partition newPart,
@Nullable final EnvironmentContext ec
) throws TException {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
requestWrapper("alter_partition_with_environment_context", new Object[]{databaseName, tableName, ec},
() -> {
addPartitionsCore(dbName, tableName, ImmutableList.of(newPart), false);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public void alter_partitions(final String dbName, final String tblName, final List<Partition> newParts)
throws TException {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
requestWrapper("add_partition", new Object[]{databaseName, tableName}, () -> {
addPartitionsCore(dbName, tableName, newParts, false);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public void alter_table(final String dbname, final String tblName, final Table newTbl) throws TException {
alter_table_with_environment_context(dbname, tblName, newTbl, null);
}
/**
* {@inheritDoc}
*/
@Override
public void alter_table_with_cascade(
final String dbname,
final String tblName,
final Table newTbl,
final boolean cascade
) throws TException {
//TODO: Add logic to cascade the changes to the partitions
alter_table_with_environment_context(dbname, tblName, newTbl, null);
}
/**
* {@inheritDoc}
*/
@Override
public void alter_table_with_environment_context(
final String dbname,
final String tblName,
final Table newTbl,
@Nullable final EnvironmentContext environmentContext
) throws TException {
requestWrapper("alter_table_with_environment_context",
new Object[]{dbname, tblName, newTbl, environmentContext}, () -> {
final String databaseName = normalizeIdentifier(dbname);
final String tableName = normalizeIdentifier(tblName);
final QualifiedName oldName = QualifiedName.ofTable(catalogName, databaseName, tableName);
final QualifiedName newName = QualifiedName
.ofTable(catalogName, newTbl.getDbName(), newTbl.getTableName());
final TableDto dto = hiveConverters.hiveToMetacatTable(newName, newTbl);
if (!oldName.equals(newName)) {
v1.renameTable(catalogName, oldName.getDatabaseName(), oldName.getTableName(),
newName.getTableName());
}
v1.updateTable(catalogName, dbname, newName.getTableName(), dto);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public Partition append_partition(final String dbName, final String tblName, final List<String> partVals)
throws TException {
return append_partition_with_environment_context(dbName, tblName, partVals, null);
}
/**
* {@inheritDoc}
*/
@Override
public Partition append_partition_by_name(final String dbName, final String tblName, final String partName)
throws TException {
return append_partition_by_name_with_environment_context(dbName, tblName, partName, null);
}
/**
* {@inheritDoc}
*/
@Override
public Partition append_partition_by_name_with_environment_context(
final String dbName, final String tblName,
final String partName,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("append_partition_by_name_with_environment_context",
new Object[]{dbName, tblName, partName},
() -> appendPartitionsCoreAndReturn(dbName, tblName, partName));
}
/**
* {@inheritDoc}
*/
@Override
public Partition append_partition_with_environment_context(
final String dbName,
final String tblName,
final List<String> partVals,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("append_partition_by_name_with_environment_context",
new Object[]{dbName, tblName, partVals}, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
appendPartitionsCore(dbName, tblName, partName);
return hiveConverters.metacatToHivePartition(getPartitionDtoByName(tableDto, partName), tableDto);
});
}
private void appendPartitionsCore(final String dbName, final String tblName, final String partName)
throws TException {
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
final PartitionDto partitionDto = new PartitionDto();
partitionDto.setName(QualifiedName.ofPartition(catalogName, dbName, tblName, partName));
partitionDto.setSerde(new StorageDto());
partitionsSaveRequestDto.setPartitions(Lists.newArrayList(partitionDto));
partV1.savePartitions(catalogName, dbName, tblName, partitionsSaveRequestDto);
}
private Partition appendPartitionsCoreAndReturn(final String dbName, final String tblName, final String partName)
throws TException {
appendPartitionsCore(dbName, tblName, partName);
return getPartitionByName(dbName, tblName, partName);
}
/**
* {@inheritDoc}
*/
@Override
public void cancel_delegation_token(final String tokenStrForm) throws TException {
throw unimplemented("cancel_delegation_token", new Object[]{tokenStrForm});
}
/**
* {@inheritDoc}
*/
@Override
public LockResponse check_lock(final CheckLockRequest rqst) throws TException {
throw unimplemented("check_lock", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void commit_txn(final CommitTxnRequest rqst) throws TException {
throw unimplemented("commit_txn", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void compact(final CompactionRequest rqst) throws TException {
throw unimplemented("compact", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void create_database(final Database database) throws TException {
requestWrapper("create_database", new Object[]{database}, () -> {
final String dbName = normalizeIdentifier(database.getName());
v1.createDatabase(catalogName, dbName,
DatabaseCreateRequestDto.builder().metadata(database.getParameters()).uri(database.getLocationUri())
.build());
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public void create_function(final Function func) throws TException {
throw unimplemented("create_function", new Object[]{func});
}
/**
* {@inheritDoc}
*/
@Override
public boolean create_role(final Role role) throws TException {
throw unimplemented("create_role", new Object[]{role});
}
/**
* {@inheritDoc}
*/
@Override
public void create_table(final Table tbl) throws TException {
create_table_with_environment_context(tbl, null);
}
/**
* {@inheritDoc}
*/
@Override
public void create_table_with_environment_context(
final Table tbl,
@Nullable final EnvironmentContext environmentContext
) throws TException {
requestWrapper("create_table_with_environment_context", new Object[]{tbl, environmentContext}, () -> {
final String dbname = normalizeIdentifier(tbl.getDbName());
final String tblName = normalizeIdentifier(tbl.getTableName());
final QualifiedName name = QualifiedName.ofTable(catalogName, dbname, tblName);
final TableDto dto = hiveConverters.hiveToMetacatTable(name, tbl);
v1.createTable(catalogName, dbname, tblName, dto);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public boolean create_type(final Type type) throws TException {
throw unimplemented("create_type", new Object[]{type});
}
/**
* {@inheritDoc}
*/
@Override
public boolean delete_partition_column_statistics(final String dbName, final String tblName,
final String partName, final String colName) throws TException {
throw unimplemented("delete_partition_column_statistics",
new Object[]{dbName, tblName, partName, colName});
}
/**
* {@inheritDoc}
*/
@Override
public boolean delete_table_column_statistics(final String dbName, final String tblName, final String colName)
throws TException {
throw unimplemented("delete_table_column_statistics", new Object[]{dbName, tblName, colName});
}
/**
* {@inheritDoc}
*/
@Override
public void drop_database(final String name, final boolean deleteData, final boolean cascade) throws TException {
requestWrapper("drop_database", new Object[]{name}, () -> {
v1.deleteDatabase(catalogName, name);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public void drop_function(final String dbName, final String funcName) throws TException {
throw unimplemented("drop_function", new Object[]{dbName, funcName});
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_index_by_name(final String dbName, final String tblName, final String indexName,
final boolean deleteData)
throws TException {
throw unimplemented("drop_index_by_name", new Object[]{dbName, tblName, indexName, deleteData});
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_partition(final String dbName, final String tblName, final List<String> partVals,
final boolean deleteData)
throws TException {
return drop_partition_with_environment_context(dbName, tblName, partVals, deleteData, null);
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_partition_by_name(final String dbName, final String tblName, final String partName,
final boolean deleteData)
throws TException {
return drop_partition_by_name_with_environment_context(dbName, tblName, partName, deleteData, null);
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_partition_by_name_with_environment_context(
final String dbName, final String tblName,
final String partName,
final boolean deleteData,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("drop_partition_by_name_with_environment_context",
new Object[]{dbName, tblName, partName, deleteData, environmentContext}, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
if (deleteData) {
log.warn("Ignoring command to delete data for {}/{}/{}/{}",
catalogName, databaseName, tableName, partName);
}
partV1.deletePartitions(catalogName, databaseName, tableName, ImmutableList.of(partName));
return true;
});
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_partition_with_environment_context(
final String dbName, final String tblName,
final List<String> partVals,
final boolean deleteData,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("drop_partition_with_environment_context",
new Object[]{dbName, tblName, partVals, deleteData, environmentContext}, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
final QualifiedName partitionName = getPartitionDtoByName(tableDto, partName).getName();
if (deleteData) {
log.warn("Ignoring command to delete data for {}/{}/{}/{}",
catalogName, tableDto.getName().getDatabaseName(), tableDto.getName().getTableName(),
partitionName.getPartitionName());
}
partV1.deletePartitions(
catalogName, tableDto.getName().getDatabaseName(), tableDto.getName().getTableName(),
ImmutableList.of(partitionName.getPartitionName()));
return true;
});
}
/**
* {@inheritDoc}
*/
@Override
public DropPartitionsResult drop_partitions_req(final DropPartitionsRequest request) throws TException {
return requestWrapper("drop_partitions_req",
new Object[]{request}, () -> {
final String databaseName = request.getDbName();
final String tableName = request.getTblName();
final boolean ifExists = request.isSetIfExists() && request.isIfExists();
final boolean needResult = !request.isSetNeedResult() || request.isNeedResult();
final List<Partition> parts = Lists.newArrayList();
final List<String> partNames = Lists.newArrayList();
int minCount = 0;
final RequestPartsSpec spec = request.getParts();
if (spec.isSetExprs()) {
final Table table = get_table(databaseName, tableName);
// Dropping by expressions.
for (DropPartitionsExpr expr : spec.getExprs()) {
++minCount; // At least one partition per expression, if not ifExists
final PartitionsByExprResult partitionsByExprResult = get_partitions_by_expr(
new PartitionsByExprRequest(databaseName, tableName, expr.bufferForExpr()));
if (partitionsByExprResult.isHasUnknownPartitions()) {
// Expr is built by DDLSA, it should only contain part cols and simple ops
throw new MetaException("Unexpected unknown partitions to drop");
}
parts.addAll(partitionsByExprResult.getPartitions());
}
final List<String> colNames = new ArrayList<>(table.getPartitionKeys().size());
for (FieldSchema col : table.getPartitionKeys()) {
colNames.add(col.getName());
}
if (!colNames.isEmpty()) {
parts.forEach(
partition -> partNames.add(FileUtils.makePartName(colNames, partition.getValues())));
}
} else if (spec.isSetNames()) {
partNames.addAll(spec.getNames());
minCount = partNames.size();
parts.addAll(get_partitions_by_names(databaseName, tableName, partNames));
} else {
throw new MetaException("Partition spec is not set");
}
if ((parts.size() < minCount) && !ifExists) {
throw new NoSuchObjectException("Some partitions to drop are missing");
}
partV1.deletePartitions(catalogName, databaseName, tableName, partNames);
final DropPartitionsResult result = new DropPartitionsResult();
if (needResult) {
result.setPartitions(parts);
}
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_role(final String roleName) throws TException {
throw unimplemented("drop_role", new Object[]{roleName});
}
/**
* {@inheritDoc}
*/
@Override
public void drop_table(final String dbname, final String name, final boolean deleteData) throws TException {
drop_table_with_environment_context(dbname, name, deleteData, null);
}
/**
* {@inheritDoc}
*/
@Override
public void drop_table_with_environment_context(final String dbname, final String name,
final boolean deleteData,
@Nullable final EnvironmentContext ec) throws TException {
requestWrapper("drop_table_with_environment_context", new Object[]{dbname, name, deleteData, ec}, () -> {
final String databaseName = normalizeIdentifier(dbname);
final String tableName = normalizeIdentifier(name);
if (deleteData) {
log.warn("Ignoring command to delete data for {}/{}/{}", catalogName, databaseName, tableName);
}
return v1.deleteTable(catalogName, databaseName, tableName);
});
}
/**
* {@inheritDoc}
*/
@Override
public boolean drop_type(final String type) throws TException {
throw unimplemented("drop_type", new Object[]{type});
}
/**
* {@inheritDoc}
*/
@Override
public Partition exchange_partition(final Map<String, String> partitionSpecs, final String sourceDb,
final String sourceTableName,
final String destDb, final String destTableName) throws TException {
throw unimplemented("exchange_partition",
new Object[]{partitionSpecs, sourceDb, sourceTableName, destDb, destTableName});
}
/**
* {@inheritDoc}
*/
@Override
public FireEventResponse fire_listener_event(final FireEventRequest rqst) throws TException {
throw unimplemented("fire_listener_event", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public String getCpuProfile(final int i) throws TException {
return "";
}
/**
* {@inheritDoc}
*/
@Override
public String getMetaConf(final String key) throws TException {
throw unimplemented("getMetaConf", new Object[]{key});
}
/**
* {@inheritDoc}
*/
@Override
public fb_status getStatus() {
log.info("Thrift({}): Called getStatus", catalogName);
return fb_status.ALIVE;
}
/**
* {@inheritDoc}
*/
@Override
public String getVersion() throws TException {
log.info("Thrift({}): Called getVersion", catalogName);
return "3.0";
}
/**
* {@inheritDoc}
*/
@Override
public AggrStats get_aggr_stats_for(final PartitionsStatsRequest request) throws TException {
throw unimplemented("get_aggr_stats_for", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_all_databases() throws TException {
return requestWrapper("get_all_databases", new Object[]{},
() -> v1.getCatalog(catalogName, true, false).getDatabases());
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_all_tables(final String dbName) throws TException {
final String databaseName = normalizeIdentifier(dbName);
return requestWrapper("get_all_tables", new Object[]{dbName},
() -> v1.getDatabase(catalogName, databaseName, false, true).getTables());
}
/**
* {@inheritDoc}
*/
@Override
public String get_config_value(final String name, final String defaultValue) throws TException {
throw unimplemented("get_config_value", new Object[]{name, defaultValue});
}
/**
* {@inheritDoc}
*/
@Override
public CurrentNotificationEventId get_current_notificationEventId() throws TException {
throw unimplemented("get_current_notificationEventId", new Object[]{});
}
/**
* {@inheritDoc}
*/
@Override
public Database get_database(final String name) throws TException {
return requestWrapper("get_database", new Object[]{name}, () -> {
final String databaseName = normalizeIdentifier(name);
final DatabaseDto dto = v1.getDatabase(catalogName, databaseName, true, false);
return hiveConverters.metacatToHiveDatabase(dto);
});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_databases(final String hivePattern) throws TException {
return requestWrapper("get_databases", new Object[]{hivePattern}, () -> {
List<String> result = v1.getCatalog(catalogName, true, false).getDatabases();
if (hivePattern != null) {
// Unsure about the pattern format. I couldn't find any tests. Assuming it is regex.
final Pattern pattern = PATTERNS.getUnchecked("(?i)" + hivePattern.replaceAll("\\*", ".*"));
result = result.stream().filter(name -> pattern.matcher(name).matches())
.collect(Collectors.toList());
}
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public String get_delegation_token(final String tokenOwner, final String renewerKerberosPrincipalName)
throws TException {
throw unimplemented("get_delegation_token", new Object[]{tokenOwner, renewerKerberosPrincipalName});
}
/**
* {@inheritDoc}
*/
@Override
public List<FieldSchema> get_fields(final String dbName, final String tableName) throws TException {
return get_fields_with_environment_context(dbName, tableName, null);
}
/**
* {@inheritDoc}
*/
@Override
public List<FieldSchema> get_fields_with_environment_context(
final String dbName,
final String tableName,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("get_fields_with_environment_context",
new Object[]{dbName, tableName, environmentContext}, () -> {
final Table table = get_table(dbName, tableName);
if (table == null || table.getSd() == null || table.getSd().getCols() == null) {
throw new MetaException("Unable to get fields for " + dbName + "." + tableName);
}
return table.getSd().getCols();
});
}
/**
* {@inheritDoc}
*/
@Override
public Function get_function(final String dbName, final String funcName) throws TException {
throw unimplemented("get_function", new Object[]{dbName, funcName});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_functions(final String dbName, final String pattern) throws TException {
return Collections.emptyList();
}
/**
* {@inheritDoc}
*/
@Override
public Index get_index_by_name(final String dbName, final String tblName, final String indexName)
throws TException {
throw unimplemented("get_index_by_name", new Object[]{dbName, tblName, indexName});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_index_names(final String dbName, final String tblName, final short maxIndexes)
throws TException {
throw unimplemented("get_index_names", new Object[]{dbName, tblName, maxIndexes});
}
/**
* {@inheritDoc}
*/
@Override
public List<Index> get_indexes(final String dbName, final String tblName, final short maxIndexes)
throws TException {
throw unimplemented("get_indexes", new Object[]{dbName, tblName, maxIndexes});
}
/**
* {@inheritDoc}
*/
@Override
public NotificationEventResponse get_next_notification(final NotificationEventRequest rqst) throws TException {
throw unimplemented("get_next_notification", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public GetOpenTxnsResponse get_open_txns() throws TException {
throw unimplemented("get_open_txns", new Object[]{});
}
/**
* {@inheritDoc}
*/
@Override
public GetOpenTxnsInfoResponse get_open_txns_info() throws TException {
throw unimplemented("get_open_txns_info", new Object[]{});
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionSpec> get_part_specs_by_filter(final String dbName, final String tblName,
final String filter, final int maxParts)
throws TException {
//TODO: Handle the use case of grouping
return requestWrapper("get_partitions_pspec", new Object[]{dbName, tblName, filter, maxParts}, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(filter, null, true, false);
final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, dbName, tblName,
null, null, null, maxParts, false, dto);
final List<Partition> partitions = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
partitions.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
final PartitionSpec pSpec = new PartitionSpec();
pSpec.setPartitionList(new PartitionListComposingSpec(partitions));
pSpec.setDbName(dbName);
pSpec.setTableName(tblName);
if (tableDto != null && tableDto.getSerde() != null) {
pSpec.setRootPath(tableDto.getSerde().getUri());
}
return Arrays.asList(pSpec);
});
}
/**
* {@inheritDoc}
*/
@Override
public Partition get_partition(final String dbName, final String tblName, final List<String> partVals)
throws TException {
return requestWrapper("get_partition", new Object[]{dbName, tblName, partVals}, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
return hiveConverters.metacatToHivePartition(getPartitionDtoByName(tableDto, partName), tableDto);
});
}
/**
* {@inheritDoc}
*/
@Override
public Partition get_partition_by_name(final String dbName, final String tblName, final String partName)
throws TException {
return requestWrapper("get_partition_by_name", new Object[]{dbName, tblName, partName},
() -> getPartitionByName(dbName, tblName, partName));
}
private TableDto getTableDto(final String dbName, final String tblName) {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
return v1.getTable(catalogName, databaseName, tableName, true, false, false);
}
private Partition getPartitionByName(final String dbName, final String tblName, final String partName
) throws TException {
final TableDto tableDto = getTableDto(dbName, tblName);
return hiveConverters.metacatToHivePartition(getPartitionDtoByName(tableDto, partName), tableDto);
}
private PartitionDto getPartitionDtoByName(final TableDto tableDto, final String partName) throws TException {
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(null, ImmutableList.of(partName), true, false);
final List<PartitionDto> partitionDtos = partV1.getPartitionsForRequest(
catalogName, tableDto.getName().getDatabaseName(), tableDto.getName().getTableName(), null, null, null,
null, false, dto);
if (partitionDtos == null || partitionDtos.isEmpty()) {
throw new NoSuchObjectException("Partition (" + partName + ") not found on " + tableDto.getName());
} else if (partitionDtos.size() != 1) {
// I don't think this is even possible
throw new NoSuchObjectException("Partition (" + partName + ") matched extra on " + tableDto.getName());
}
return partitionDtos.get(0);
}
/**
* {@inheritDoc}
*/
@Override
public ColumnStatistics get_partition_column_statistics(
final String dbName,
final String tblName,
final String partName,
final String colName
) throws TException {
throw unimplemented("get_partition_column_statistics", new Object[]{dbName, tblName, partName, colName});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_partition_names(final String dbName, final String tblName, final short maxParts)
throws TException {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
return requestWrapper("get_partition_names", new Object[]{databaseName, tableName, maxParts}, () -> partV1
.getPartitionKeys(catalogName, databaseName, tableName, null, null, null, null, maxValues));
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_partition_names_ps(final String dbName, final String tblName,
final List<String> partVals, final short maxParts) throws TException {
return requestWrapper("get_partition_names_ps", new Object[]{dbName, tblName, partVals, maxParts},
() -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final String partFilter = partition_values_to_partition_filter(databaseName, tableName, partVals);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
return partV1.getPartitionKeys(catalogName, databaseName, tableName, partFilter, null, null, null,
maxValues);
});
}
/**
* {@inheritDoc}
*/
@Override
public Partition get_partition_with_auth(
final String dbName,
final String tblName,
final List<String> partVals,
final String userName,
final List<String> groupNames
) throws TException {
//TODO: Handle setting the privileges
MetacatContextManager.getContext().setUserName(userName);
return get_partition(dbName, tblName, partVals);
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions(final String dbName, final String tblName, final short maxParts)
throws TException {
return requestWrapper("get_partitions", new Object[]{dbName, tblName, maxParts}, () -> {
return getPartitionsByFilter(dbName, tblName, null, maxParts);
});
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsByExprResult get_partitions_by_expr(final PartitionsByExprRequest req) throws TException {
return requestWrapper("get_partitions_by_expr", new Object[]{req},
() -> {
try {
String filter = null;
if (req.getExpr() != null) {
filter = Utilities.deserializeExpressionFromKryo(req.getExpr()).getExprString();
if (filter == null) {
throw new MetaException("Failed to deserialize expression - ExprNodeDesc not present");
}
}
//TODO: We need to handle the case for 'hasUnknownPartitions'
return new PartitionsByExprResult(
getPartitionsByFilter(req.getDbName(), req.getTblName(), filter, req.getMaxParts()),
false);
} catch (Exception e) {
//
// If there is an exception with filtering, fallback to getting all partition names and then
// apply the filter.
//
final List<String> partitionNames = Lists.newArrayList(
get_partition_names(req.getDbName(), req.getTblName(), (short) -1));
final Table table = get_table(req.getDbName(), req.getTblName());
final List<String> columnNames = new ArrayList<>();
final List<PrimitiveTypeInfo> typeInfos = new ArrayList<>();
for (FieldSchema fs : table.getPartitionKeys()) {
columnNames.add(fs.getName());
typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()));
}
final boolean hasUnknownPartitions = new PartitionExpressionForMetastore().filterPartitionsByExpr(
columnNames, typeInfos, req.getExpr(), req.getDefaultPartitionName(), partitionNames);
return new PartitionsByExprResult(get_partitions_by_names(
req.getDbName(), req.getTblName(), partitionNames), hasUnknownPartitions);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_by_filter(final String dbName, final String tblName, final String filter,
final short maxParts)
throws TException {
return requestWrapper("get_partitions_by_filter", new Object[]{dbName, tblName, filter, maxParts},
() -> getPartitionsByFilter(dbName, tblName, filter, maxParts));
}
private List<Partition> getPartitionsByFilter(final String dbName, final String tblName,
@Nullable final String filter, final short maxParts) {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(filter, null, true, false);
final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, dbName, tblName,
null, null, null, maxValues, false, dto);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_by_names(final String dbName, final String tblName,
final List<String> names)
throws TException {
return requestWrapper("get_partitions_by_names", new Object[]{dbName, tblName, names}, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(null, names, true, false);
final List<PartitionDto> metacatPartitions =
partV1.getPartitionsForRequest(catalogName, databaseName, tableName, null,
null, null, null, false, dto);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_ps(final String dbName, final String tblName,
final List<String> partVals, final short maxParts)
throws TException {
return requestWrapper("get_partitions_ps", new Object[]{dbName, tblName, partVals, maxParts}, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final String partFilter = partition_values_to_partition_filter(tableDto, partVals);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(partFilter, null, true, false);
final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, dbName, tblName,
null, null, null, maxValues, false, dto);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_ps_with_auth(
final String dbName, final String tblName,
final List<String> partVals,
final short maxParts,
final String userName,
final List<String> groupNames
) throws TException {
//TODO: Handle setting the privileges
MetacatContextManager.getContext().setUserName(userName);
return get_partitions_ps(dbName, tblName, partVals, maxParts);
}
/**
* {@inheritDoc}
*/
@Override
public List<PartitionSpec> get_partitions_pspec(final String dbName, final String tblName, final int maxParts)
throws TException {
return get_part_specs_by_filter(dbName, tblName, null, maxParts);
}
/**
* {@inheritDoc}
*/
@Override
public PartitionsStatsResult get_partitions_statistics_req(final PartitionsStatsRequest request) throws TException {
throw unimplemented("get_partitions_statistics_req", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_with_auth(
final String dbName,
final String tblName,
final short maxParts,
final String userName,
final List<String> groupNames
) throws TException {
//TODO: Handle setting the privileges
MetacatContextManager.getContext().setUserName(userName);
return get_partitions(dbName, tblName, maxParts);
}
/**
* {@inheritDoc}
*/
@Override
public GetPrincipalsInRoleResponse get_principals_in_role(final GetPrincipalsInRoleRequest request)
throws TException {
throw unimplemented("get_principals_in_role", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public PrincipalPrivilegeSet get_privilege_set(final HiveObjectRef hiveObject, final String userName,
final List<String> groupNames)
throws TException {
MetacatContextManager.getContext().setUserName(userName);
return requestWrapper("get_privilege_set", new Object[]{hiveObject, userName, groupNames},
() -> {
Map<String, List<PrivilegeGrantInfo>> groupPrivilegeSet = null;
Map<String, List<PrivilegeGrantInfo>> userPrivilegeSet = null;
if (groupNames != null) {
groupPrivilegeSet = groupNames.stream()
.collect(Collectors.toMap(p -> p, p -> Lists.newArrayList()));
}
if (userName != null) {
userPrivilegeSet = ImmutableMap.of(userName, Lists.newArrayList());
}
return new PrincipalPrivilegeSet(userPrivilegeSet,
groupPrivilegeSet,
defaultRolesPrivilegeSet);
});
}
/**
* {@inheritDoc}
*/
@Override
public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
final GetRoleGrantsForPrincipalRequest request)
throws TException {
throw unimplemented("get_role_grants_for_principal", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_role_names() throws TException {
throw unimplemented("get_role_names", new Object[]{});
}
/**
* {@inheritDoc}
*/
@Override
public List<FieldSchema> get_schema(final String dbName, final String tableName) throws TException {
return get_schema_with_environment_context(dbName, tableName, null);
}
/**
* {@inheritDoc}
*/
@Override
public List<FieldSchema> get_schema_with_environment_context(
final String dbName,
final String tableName,
@Nullable final EnvironmentContext environmentContext
) throws TException {
return requestWrapper("get_schema_with_environment_context",
new Object[]{dbName, tableName, environmentContext}, () -> {
final Table table = get_table(dbName, tableName);
List<FieldSchema> partitionKeys = Collections.emptyList();
List<FieldSchema> columns = Collections.emptyList();
if (table != null && table.getSd() != null && table.getSd().getCols() != null) {
columns = table.getSd().getCols();
}
if (table != null && table.getPartitionKeys() != null) {
partitionKeys = table.getPartitionKeys();
}
if (partitionKeys.isEmpty() && columns.isEmpty()) {
throw new MetaException(
"Table does not have any partition keys or cols: " + dbName + "." + tableName);
}
final List<FieldSchema> result = Lists.newArrayListWithCapacity(partitionKeys.size() + columns.size());
result.addAll(columns);
result.addAll(partitionKeys);
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public Table get_table(final String dbname, final String tblName) throws TException {
return requestWrapper("get_table", new Object[]{dbname, tblName}, () -> {
final String databaseName = normalizeIdentifier(dbname);
final String tableName = normalizeIdentifier(tblName);
final TableDto dto = v1.getTable(catalogName, databaseName, tableName, true, true, true);
return hiveConverters.metacatToHiveTable(dto);
});
}
/**
* {@inheritDoc}
*/
@Override
public ColumnStatistics get_table_column_statistics(final String dbName, final String tblName,
final String colName)
throws TException {
throw unimplemented("get_table_column_statistics", new Object[]{dbName, tblName, colName});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_table_names_by_filter(final String dbname, final String filter, final short maxTables)
throws TException {
return requestWrapper("get_table_names_by_filter", new Object[]{dbname, filter, maxTables}, () -> {
final String databaseName = normalizeIdentifier(dbname);
return v1.getTableNames(catalogName, databaseName, filter, (int) maxTables).stream()
.map(QualifiedName::getTableName).collect(Collectors.toList());
});
}
/**
* {@inheritDoc}
*/
@Override
public List<Table> get_table_objects_by_name(final String dbname, final List<String> tblNames) throws TException {
return requestWrapper("get_table_objects_by_name", new Object[]{dbname, tblNames}, () -> {
final String databaseName = normalizeIdentifier(dbname);
return tblNames.stream()
.map(CatalogThriftHiveMetastore::normalizeIdentifier)
.map(name -> v1.getTable(catalogName, databaseName, name, true, true, true))
.map(this.hiveConverters::metacatToHiveTable)
.collect(Collectors.toList());
});
}
/**
* {@inheritDoc}
*/
@Override
public TableStatsResult get_table_statistics_req(final TableStatsRequest request) throws TException {
throw unimplemented("get_table_statistics_req", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> get_tables(final String dbName, final String hivePattern) throws TException {
return requestWrapper("get_tables", new Object[]{dbName, hivePattern}, () -> {
List<String> result = v1.getDatabase(catalogName, dbName, false, true).getTables();
if (hivePattern != null) {
final Pattern pattern = PATTERNS.getUnchecked("(?i)" + hivePattern.replaceAll("\\*", ".*"));
result = result.stream().filter(name -> pattern.matcher(name).matches()).collect(Collectors.toList());
}
return result;
});
}
/**
* {@inheritDoc}
*/
@Override
public Type get_type(final String name) throws TException {
throw unimplemented("get_type", new Object[]{name});
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Type> get_type_all(final String name) throws TException {
throw unimplemented("get_type_all", new Object[]{name});
}
/**
* {@inheritDoc}
*/
@Override
public boolean grant_privileges(final PrivilegeBag privileges) throws TException {
throw unimplemented("grant_privileges", new Object[]{privileges});
}
/**
* {@inheritDoc}
*/
@Override
public GrantRevokePrivilegeResponse grant_revoke_privileges(final GrantRevokePrivilegeRequest request)
throws TException {
throw unimplemented("grant_revoke_privileges", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public GrantRevokeRoleResponse grant_revoke_role(final GrantRevokeRoleRequest request) throws TException {
throw unimplemented("grant_revoke_role", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public boolean grant_role(
final String roleName,
final String principalName,
final PrincipalType principalType,
final String grantor,
final PrincipalType grantorType,
final boolean grantOption
) throws TException {
throw unimplemented("grant_role",
new Object[]{roleName, principalName, principalType, grantor, grantorType});
}
/**
* {@inheritDoc}
*/
@Override
public void heartbeat(final HeartbeatRequest ids) throws TException {
throw unimplemented("heartbeat", new Object[]{ids});
}
/**
* {@inheritDoc}
*/
@Override
public HeartbeatTxnRangeResponse heartbeat_txn_range(final HeartbeatTxnRangeRequest txns) throws TException {
throw unimplemented("heartbeat_txn_range", new Object[]{txns});
}
/**
* {@inheritDoc}
*/
@Override
public boolean isPartitionMarkedForEvent(
final String dbName,
final String tblName,
final Map<String, String> partVals,
final PartitionEventType eventType
) throws TException {
throw unimplemented("isPartitionMarkedForEvent", new Object[]{dbName, tblName, partVals, eventType});
}
/**
* {@inheritDoc}
*/
@Override
public List<HiveObjectPrivilege> list_privileges(final String principalName, final PrincipalType principalType,
final HiveObjectRef hiveObject) throws TException {
throw unimplemented("list_privileges", new Object[]{principalName, principalType, hiveObject});
}
/**
* {@inheritDoc}
*/
@Override
public List<Role> list_roles(final String principalName, final PrincipalType principalType) throws TException {
throw unimplemented("list_roles", new Object[]{principalName, principalType});
}
/**
* {@inheritDoc}
*/
@Override
public LockResponse lock(final LockRequest rqst) throws TException {
throw unimplemented("lock", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void markPartitionForEvent(final String dbName, final String tblName,
final Map<String, String> partVals,
final PartitionEventType eventType) throws TException {
throw unimplemented("markPartitionForEvent", new Object[]{dbName, tblName, partVals, eventType});
}
/**
* {@inheritDoc}
*/
@Override
public OpenTxnsResponse open_txns(final OpenTxnRequest rqst) throws TException {
throw unimplemented("open_txns", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public boolean partition_name_has_valid_characters(final List<String> partVals, final boolean throwException)
throws TException {
return requestWrapper("partition_name_has_valid_characters", new Object[]{partVals, throwException},
() -> {
Pattern pattern = null;
final String partitionPattern = config.getHivePartitionWhitelistPattern();
if (!Strings.isNullOrEmpty(partitionPattern)) {
pattern = PATTERNS.getUnchecked(partitionPattern);
}
if (throwException) {
MetaStoreUtils.validatePartitionNameCharacters(partVals, pattern);
return true;
} else {
return MetaStoreUtils.partitionNameHasValidCharacters(partVals, pattern);
}
});
}
/**
* {@inheritDoc}
*/
@Override
@SuppressWarnings("unchecked")
public Map<String, String> partition_name_to_spec(final String partName) throws TException {
return requestWrapper("partition_name_to_spec", new Object[]{partName}, () -> {
if (Strings.isNullOrEmpty(partName)) {
return (Map<String, String>) Collections.EMPTY_MAP;
}
return Warehouse.makeSpecFromName(partName);
});
}
/**
* {@inheritDoc}
*/
@Override
@SuppressWarnings("unchecked")
public List<String> partition_name_to_vals(final String partName) throws TException {
return requestWrapper("partition_name_to_vals", new Object[]{partName}, () -> {
if (Strings.isNullOrEmpty(partName)) {
return (List<String>) Collections.EMPTY_LIST;
}
final Map<String, String> spec = Warehouse.makeSpecFromName(partName);
final List<String> vals = Lists.newArrayListWithCapacity(spec.size());
vals.addAll(spec.values());
return vals;
});
}
/**
* Converts the partial specification given by the part_vals to a Metacat filter statement.
*
* @param dbName the name of the database
* @param tblName the name of the table
* @param partVals the partial specification values
* @return A Meta
* cat filter expression suitable for selecting out the partitions matching the specification
* @throws MetaException if there are more part_vals specified than partition columns on the table.
*/
@SuppressWarnings("checkstyle:methodname")
String partition_values_to_partition_filter(final String dbName, final String tblName,
final List<String> partVals)
throws MetaException {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
return partition_values_to_partition_filter(tableDto, partVals);
}
/**
* Converts the partial specification given by the part_vals to a Metacat filter statement.
*
* @param tableDto the Metacat representation of the table
* @param partVals the partial specification values
* @return A Metacat filter expression suitable for selecting out the partitions matching the specification
* @throws MetaException if there are more part_vals specified than partition columns on the table.
*/
@SuppressWarnings("checkstyle:methodname")
String partition_values_to_partition_filter(final TableDto tableDto, final List<String> partVals)
throws MetaException {
if (partVals.size() > tableDto.getPartition_keys().size()) {
throw new MetaException("Too many partition values for " + tableDto.getName());
}
final List<FieldDto> fields = tableDto.getFields();
final List<String> partitionFilters = Lists.newArrayListWithCapacity(fields.size());
for (int i = 0, partitionIdx = 0; i < fields.size(); i++) {
final FieldDto fieldDto = fields.get(i);
if (!fieldDto.isPartition_key() || partitionIdx >= partVals.size()) {
continue;
}
final String partitionValueFilter = partVals.get(partitionIdx++);
// We only filter on partitions with values
if (!Strings.isNullOrEmpty(partitionValueFilter)) {
String filter = "(" + fieldDto.getName() + "=";
try {
filter += Long.parseLong(partitionValueFilter) + ")";
} catch (NumberFormatException ignored) {
filter += "'" + partitionValueFilter + "')";
}
partitionFilters.add(filter);
}
}
return partitionFilters.isEmpty() ? null : AND_JOINER.join(partitionFilters);
}
/**
* {@inheritDoc}
*/
@Override
public void rename_partition(final String dbName, final String tblName, final List<String> partVals,
final Partition newPart)
throws TException {
requestWrapper("rename_partition", new Object[]{dbName, tblName, partVals}, () -> {
final TableDto tableDto = getTableDto(dbName, tblName);
final String partName = hiveConverters.getNameFromPartVals(tableDto, partVals);
final PartitionsSaveRequestDto partitionsSaveRequestDto = new PartitionsSaveRequestDto();
final PartitionDto partitionDto = hiveConverters.hiveToMetacatPartition(tableDto, newPart);
partitionsSaveRequestDto.setPartitions(Lists.newArrayList(partitionDto));
partitionsSaveRequestDto.setPartitionIdsForDeletes(Lists.newArrayList(partName));
partV1.savePartitions(catalogName, dbName, tblName, partitionsSaveRequestDto);
return null;
});
}
/**
* {@inheritDoc}
*/
@Override
public long renew_delegation_token(final String tokenStrForm) throws TException {
throw unimplemented("renew_delegation_token", new Object[]{tokenStrForm});
}
private TException unimplemented(final String methodName, final Object[] args) throws TException {
log.info("+++ Thrift({}): Calling {}({})", catalogName, methodName, args);
throw new InvalidOperationException("Not implemented yet: " + methodName);
}
private <R> R requestWrapper(final String methodName, final Object[] args, final ThriftSupplier<R> supplier)
throws TException {
final long start = registry.clock().wallTime();
registry.counter(registry.createId(Metrics.CounterThrift.getMetricName() + "." + methodName)).increment();
try {
final MetacatRequestContext requestContext = MetacatContextManager.getContext();
requestContext.clearTableTypeMap();
log.info("+++ Thrift({}): Calling {}({}). Request Context: {}",
catalogName, methodName, args, requestContext);
return supplier.get();
} catch (MetacatAlreadyExistsException e) {
log.error(e.getMessage(), e);
throw new AlreadyExistsException(e.getMessage());
} catch (MetacatNotFoundException e) {
log.error(e.getMessage(), e);
throw new NoSuchObjectException(e.getMessage());
} catch (MetacatPreconditionFailedException e) {
log.error(e.getMessage(), e);
throw new InvalidObjectException(e.getMessage());
} catch (TException e) {
log.error(e.getMessage(), e);
throw e;
} catch (Exception e) {
registry.counter(registry.createId(Metrics.CounterThrift.getMetricName() + "." + methodName)
.withTags(Metrics.tagStatusFailureMap)).increment();
final String message = String.format("%s -- %s failed", e.getMessage(), methodName);
log.error(message, e);
final MetaException me = new MetaException(message);
me.initCause(e);
throw me;
} finally {
final long duration = registry.clock().wallTime() - start;
this.registry.timer(Metrics.TimerThriftRequest.getMetricName()
+ "." + methodName).record(duration, TimeUnit.MILLISECONDS);
log.info("+++ Thrift({}): Time taken to complete {} is {} ms", catalogName, methodName, duration);
}
}
/**
* {@inheritDoc}
*/
@Override
public boolean revoke_privileges(final PrivilegeBag privileges) throws TException {
throw unimplemented("revoke_privileges", new Object[]{privileges});
}
/**
* {@inheritDoc}
*/
@Override
public boolean revoke_role(final String roleName, final String principalName, final PrincipalType principalType)
throws TException {
throw unimplemented("revoke_role", new Object[]{roleName, principalName, principalType});
}
/**
* {@inheritDoc}
*/
@Override
public void setMetaConf(final String key, final String value) throws TException {
throw unimplemented("setMetaConf", new Object[]{key, value});
}
/**
* {@inheritDoc}
*/
@Override
public boolean set_aggr_stats_for(final SetPartitionsStatsRequest request) throws TException {
throw unimplemented("set_aggr_stats_for", new Object[]{request});
}
/**
* {@inheritDoc}
*/
@Override
public List<String> set_ugi(final String userName, final List<String> groupNames) throws TException {
MetacatContextManager.getContext().setUserName(userName);
return requestWrapper("set_ugi", new Object[]{userName, groupNames}, () -> {
Collections.addAll(groupNames, userName);
return groupNames;
});
}
/**
* {@inheritDoc}
*/
@Override
public ShowCompactResponse show_compact(final ShowCompactRequest rqst) throws TException {
throw unimplemented("show_compact", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public ShowLocksResponse show_locks(final ShowLocksRequest rqst) throws TException {
throw unimplemented("show_locks", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public void unlock(final UnlockRequest rqst) throws TException {
throw unimplemented("unlock", new Object[]{rqst});
}
/**
* {@inheritDoc}
*/
@Override
public boolean update_partition_column_statistics(final ColumnStatistics statsObj) throws TException {
throw unimplemented("update_partition_column_statistics", new Object[]{statsObj});
}
/**
* {@inheritDoc}
*/
@Override
public boolean update_table_column_statistics(final ColumnStatistics statsObj) throws TException {
throw unimplemented("update_table_column_statistics", new Object[]{statsObj});
}
@FunctionalInterface
interface ThriftSupplier<T> {
T get() throws TException;
}
}
| 9,750 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/CatalogThriftEventHandler.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.google.common.base.Objects;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.server.ServerContext;
import org.apache.thrift.server.TServerEventHandler;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import javax.annotation.Nullable;
/**
* Server event handler.
*/
public class CatalogThriftEventHandler implements TServerEventHandler {
/**
* {@inheritDoc}
*/
@Override
public ServerContext createContext(final TProtocol input, final TProtocol output) {
final String userName = "metacat-thrift-interface";
String clientHost = null; //requestContext.getHeaderString("X-Forwarded-For");
final long requestThreadId = Thread.currentThread().getId();
final TTransport transport = input.getTransport();
if (transport instanceof TSocket) {
final TSocket thriftSocket = (TSocket) transport;
clientHost = thriftSocket.getSocket().getInetAddress().getHostAddress();
}
final CatalogServerRequestContext context = new CatalogServerRequestContext(
userName,
null,
clientHost,
null,
"hive",
requestThreadId
);
MetacatContextManager.setContext(context);
return context;
}
/**
* {@inheritDoc}
*/
@Override
public void deleteContext(final ServerContext serverContext, final TProtocol input, final TProtocol output) {
validateRequest((CatalogServerRequestContext) serverContext);
MetacatContextManager.removeContext();
}
/**
* {@inheritDoc}
*/
@Override
public void preServe() {
// nothing to do
}
/**
* {@inheritDoc}
*/
@Override
public void processContext(final ServerContext serverContext, final TTransport inputTransport,
final TTransport outputTransport) {
validateRequest((CatalogServerRequestContext) serverContext);
}
private void validateRequest(final CatalogServerRequestContext serverContext) {
final long requestThreadId = serverContext.requestThreadId;
if (requestThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Expect all processing to happen on the same thread as the request thread");
}
}
/**
* request context.
*/
public static class CatalogServerRequestContext extends MetacatRequestContext implements ServerContext {
private final long requestThreadId;
CatalogServerRequestContext(
@Nullable final String userName,
@Nullable final String clientAppName,
@Nullable final String clientId,
@Nullable final String jobId,
@Nullable final String dataTypeContext,
final long requestThreadId
) {
super(userName, clientAppName, clientId, jobId, dataTypeContext, MetacatRequestContext.UNKNOWN, "thrift");
this.requestThreadId = requestThreadId;
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
final CatalogServerRequestContext that = (CatalogServerRequestContext) o;
return requestThreadId == that.requestThreadId;
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return Objects.hashCode(super.hashCode(), requestThreadId);
}
}
}
| 9,751 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/DateConverters.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.netflix.metacat.common.server.properties.Config;
import lombok.NonNull;
import javax.annotation.Nullable;
import java.time.Instant;
import java.util.Date;
/**
* Date converter.
*
* @author amajumdar
* @since 1.0.0
*/
public class DateConverters {
private final Config config;
/**
* Constructor.
*
* @param config The application config.
*/
public DateConverters(@NonNull final Config config) {
this.config = config;
}
/**
* Converts date to epoch.
*
* @param d date
* @return epoch time
*/
public Long fromDateToLong(@Nullable final Date d) {
if (d == null) {
return null;
}
final Instant instant = d.toInstant();
return config.isEpochInSeconds() ? instant.getEpochSecond() : instant.toEpochMilli();
}
/**
* Converts epoch to date.
*
* @param l epoch time
* @return date
*/
public Date fromLongToDate(@Nullable final Long l) {
if (l == null) {
return null;
}
final Instant instant = config.isEpochInSeconds() ? Instant.ofEpochSecond(l) : Instant.ofEpochMilli(l);
return Date.from(instant);
}
}
| 9,752 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/CatalogThriftServiceFactory.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
/**
* Thrift service factory.
*/
public interface CatalogThriftServiceFactory {
/**
* Create service.
*
* @param catalogName catalog name
* @param portNumber port
* @return service
*/
CatalogThriftService create(String catalogName, int portNumber);
}
| 9,753 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/CatalogThriftServiceFactoryImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.netflix.metacat.common.server.api.v1.MetacatV1;
import com.netflix.metacat.common.server.api.v1.PartitionV1;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import java.util.Objects;
/**
* Thrift service factory.
*/
public class CatalogThriftServiceFactoryImpl implements CatalogThriftServiceFactory {
private final LoadingCache<CacheKey, CatalogThriftService> cache;
/**
* Constructor.
*
* @param config config
* @param hiveConverters hive converter
* @param metacatV1 Metacat V1
* @param partitionV1 Partition V1
* @param registry registry for spectator
*/
public CatalogThriftServiceFactoryImpl(
final Config config,
final HiveConverters hiveConverters,
final MetacatV1 metacatV1,
final PartitionV1 partitionV1,
final Registry registry
) {
this.cache = CacheBuilder.newBuilder()
.build(new CacheLoader<CacheKey, CatalogThriftService>() {
public CatalogThriftService load(final CacheKey key) {
return new CatalogThriftService(config, hiveConverters, metacatV1, partitionV1,
key.catalogName, key.portNumber, registry);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public CatalogThriftService create(final String catalogName, final int portNumber) {
return cache.getUnchecked(new CacheKey(catalogName, portNumber));
}
private static final class CacheKey {
private final String catalogName;
private final int portNumber;
private CacheKey(final String catalogName, final int portNumber) {
this.catalogName = catalogName;
this.portNumber = portNumber;
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (!(o instanceof CacheKey)) {
return false;
}
final CacheKey cacheKey = (CacheKey) o;
return Objects.equals(portNumber, cacheKey.portNumber) && Objects.equals(catalogName, cacheKey.catalogName);
}
/**
* {@inheritDoc}
*/
@Override
public int hashCode() {
return Objects.hash(catalogName, portNumber);
}
}
}
| 9,754 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/HiveConverters.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.TableDto;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.List;
/**
* Hive converter interface.
*/
public interface HiveConverters {
/**
* Converts from hive table to metacat table info.
*
* @param name name
* @param table table
* @return table info
*/
TableDto hiveToMetacatTable(QualifiedName name, Table table);
/**
* Converts from metacat database info to hive database info.
*
* @param databaseDto database
* @return database
*/
Database metacatToHiveDatabase(DatabaseDto databaseDto);
/**
* Converts from metacat table info to hive table info.
*
* @param dto table
* @return table
*/
Table metacatToHiveTable(TableDto dto);
/**
* Converts from hive partition info to metacat partition info.
*
* @param tableDto table
* @param partition partition
* @return partition info
*/
PartitionDto hiveToMetacatPartition(TableDto tableDto, Partition partition);
/**
* Gets the partition values from the partition name.
*
* @param tableDto table
* @param partName partition name
* @return partition info
*/
List<String> getPartValsFromName(TableDto tableDto, String partName);
/**
* Gets the partition name from partition values.
*
* @param tableDto table
* @param partVals partition values
* @return partition name
*/
String getNameFromPartVals(TableDto tableDto, List<String> partVals);
/**
* Converts from metacat partition info to hive partition info.
*
* @param tableDto table
* @param partitionDto partition
* @return partition info
*/
Partition metacatToHivePartition(PartitionDto partitionDto, TableDto tableDto);
}
| 9,755 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/HiveConvertersImpl.java
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.thrift;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.AuditDto;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.FieldDto;
import com.netflix.metacat.common.dto.PartitionDto;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.dto.ViewDto;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import javax.annotation.Nullable;
import java.time.Instant;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Hive converter.
*/
public class HiveConvertersImpl implements HiveConverters {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@VisibleForTesting
Integer dateToEpochSeconds(@Nullable final Date date) {
if (date == null) {
return null;
}
final Instant instant = date.toInstant();
final long seconds = instant.getEpochSecond();
if (seconds <= Integer.MAX_VALUE) {
return (int) seconds;
}
throw new IllegalStateException("Unable to convert date " + date + " to an integer seconds value");
}
private Date epochSecondsToDate(final long seconds) {
final Instant instant = Instant.ofEpochSecond(seconds);
return Date.from(instant);
}
private FieldDto hiveToMetacatField(final FieldSchema field, final boolean isPartitionKey) {
final FieldDto dto = new FieldDto();
dto.setName(field.getName());
dto.setType(field.getType());
dto.setSource_type(field.getType());
dto.setComment(field.getComment());
dto.setPartition_key(isPartitionKey);
return dto;
}
private FieldSchema metacatToHiveField(final FieldDto fieldDto) {
final FieldSchema result = new FieldSchema();
result.setName(fieldDto.getName());
result.setType(fieldDto.getType());
result.setComment(fieldDto.getComment());
return result;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto hiveToMetacatTable(final QualifiedName name, final Table table) {
final TableDto dto = new TableDto();
dto.setSerde(toStorageDto(table.getSd(), table.getOwner()));
dto.setAudit(new AuditDto());
dto.setName(name);
if (table.isSetCreateTime()) {
dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime()));
}
dto.setMetadata(table.getParameters());
final List<FieldSchema> nonPartitionColumns = table.getSd().getCols();
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final List<FieldDto> allFields =
Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream()
.map(field -> this.hiveToMetacatField(field, false))
.forEachOrdered(allFields::add);
partitionColumns.stream()
.map(field -> this.hiveToMetacatField(field, true))
.forEachOrdered(allFields::add);
dto.setFields(allFields);
dto.setView(new ViewDto(table.getViewOriginalText(),
table.getViewExpandedText()));
if (StringUtils.isNotBlank(table.getOwner())) {
final ObjectNode definitionMetadata = OBJECT_MAPPER.createObjectNode();
final ObjectNode ownerNode = definitionMetadata.with("owner");
ownerNode.put("userId", table.getOwner());
dto.setDefinitionMetadata(definitionMetadata);
}
return dto;
}
/**
* {@inheritDoc}
*/
@Override
@SuppressWarnings("unchecked")
public Database metacatToHiveDatabase(final DatabaseDto dto) {
final Database database = new Database();
String name = "";
String description = "";
final QualifiedName databaseName = dto.getName();
if (databaseName != null) {
name = databaseName.getDatabaseName();
// Since this is required setting it to the same as the DB name for now
description = databaseName.getDatabaseName();
}
database.setName(name);
database.setDescription(description);
String dbUri = dto.getUri();
if (Strings.isNullOrEmpty(dbUri)) {
dbUri = "";
}
database.setLocationUri(dbUri);
Map<String, String> metadata = dto.getMetadata();
if (metadata == null) {
metadata = Collections.EMPTY_MAP;
}
database.setParameters(metadata);
return database;
}
/**
* {@inheritDoc}
*/
@Override
public Table metacatToHiveTable(final TableDto dto) {
final Table table = new Table();
final QualifiedName name = dto.getName();
if (name != null) {
table.setTableName(name.getTableName());
table.setDbName(name.getDatabaseName());
}
final StorageDto storageDto = dto.getSerde();
if (storageDto != null) {
table.setOwner(storageDto.getOwner());
}
final AuditDto auditDto = dto.getAudit();
if (auditDto != null && auditDto.getCreatedDate() != null) {
table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
Map<String, String> params = new HashMap<>();
if (dto.getMetadata() != null) {
params = dto.getMetadata();
}
table.setParameters(params);
updateTableTypeAndViewInfo(dto, table);
table.setSd(fromStorageDto(storageDto, table.getTableName()));
final List<FieldDto> fields = dto.getFields();
if (fields == null) {
table.setPartitionKeys(Collections.emptyList());
table.getSd().setCols(Collections.emptyList());
} else {
final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size());
for (FieldDto fieldDto : fields) {
final FieldSchema f = metacatToHiveField(fieldDto);
if (fieldDto.isPartition_key()) {
partitionFields.add(f);
} else {
nonPartitionFields.add(f);
}
}
table.setPartitionKeys(partitionFields);
table.getSd().setCols(nonPartitionFields);
}
dto.getTableOwner().ifPresent(table::setOwner);
return table;
}
private void updateTableTypeAndViewInfo(final TableDto dto, final Table table) {
final ViewDto viewDto = dto.getView();
if (null == dto.getView() || Strings.isNullOrEmpty(viewDto.getViewOriginalText())) {
table.setTableType(TableType.EXTERNAL_TABLE.name());
return;
}
table.setTableType(TableType.VIRTUAL_VIEW.name());
table.setViewOriginalText(viewDto.getViewOriginalText());
table.setViewExpandedText(viewDto.getViewExpandedText());
}
private StorageDto toStorageDto(@Nullable final StorageDescriptor sd, final String owner) {
final StorageDto result = new StorageDto();
if (sd != null) {
result.setOwner(owner);
result.setUri(sd.getLocation());
result.setInputFormat(sd.getInputFormat());
result.setOutputFormat(sd.getOutputFormat());
result.setParameters(sd.getParameters());
final SerDeInfo serde = sd.getSerdeInfo();
if (serde != null) {
result.setSerializationLib(serde.getSerializationLib());
result.setSerdeInfoParameters(serde.getParameters());
}
}
return result;
}
private StorageDescriptor fromStorageDto(@Nullable final StorageDto storageDto, @Nullable final String serdeName) {
//
// Set all required fields to null. This is to simulate Hive behavior.
// Setting it to empty string failed certain hive operations.
//
final StorageDescriptor result = new StorageDescriptor();
String inputFormat = null;
String location = null;
String outputFormat = null;
String serializationLib = null;
Map<String, String> sdParams = Maps.newHashMap();
Map<String, String> serdeParams = Maps.newHashMap();
if (storageDto != null) {
if (storageDto.getInputFormat() != null) {
inputFormat = storageDto.getInputFormat();
}
if (storageDto.getUri() != null) {
location = storageDto.getUri();
}
if (storageDto.getOutputFormat() != null) {
outputFormat = storageDto.getOutputFormat();
}
if (storageDto.getSerializationLib() != null) {
serializationLib = storageDto.getSerializationLib();
}
if (storageDto.getParameters() != null) {
sdParams = storageDto.getParameters();
}
if (storageDto.getSerdeInfoParameters() != null) {
serdeParams = storageDto.getSerdeInfoParameters();
}
}
result.setSerdeInfo(new SerDeInfo(serdeName, serializationLib, serdeParams));
result.setBucketCols(Collections.emptyList());
result.setSortCols(Collections.emptyList());
result.setInputFormat(inputFormat);
result.setLocation(location);
result.setOutputFormat(outputFormat);
result.setCols(Collections.emptyList());
// Setting an empty skewed info.
result.setSkewedInfo(new SkewedInfo(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap()));
result.setParameters(sdParams);
return result;
}
/**
* {@inheritDoc}
*/
@Override
public PartitionDto hiveToMetacatPartition(final TableDto tableDto, final Partition partition) {
final QualifiedName tableName = tableDto.getName();
final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(),
tableName.getTableName(), getNameFromPartVals(tableDto, partition.getValues()));
final PartitionDto result = new PartitionDto();
String owner = "";
if (tableDto.getSerde() != null) {
owner = tableDto.getSerde().getOwner();
}
//not setting Serde to view
if (null == tableDto.getView() || Strings.isNullOrEmpty(tableDto.getView().getViewOriginalText())) {
result.setSerde(toStorageDto(partition.getSd(), owner));
}
result.setMetadata(partition.getParameters());
final AuditDto auditDto = new AuditDto();
auditDto.setCreatedDate(epochSecondsToDate(partition.getCreateTime()));
auditDto.setLastModifiedDate(epochSecondsToDate(partition.getLastAccessTime()));
result.setAudit(auditDto);
result.setName(partitionName);
return result;
}
/**
* {@inheritDoc}
*/
@Override
public List<String> getPartValsFromName(@Nullable final TableDto tableDto, final String partName) {
// Unescape the partition name
final LinkedHashMap<String, String> hm;
try {
hm = Warehouse.makeSpecFromName(partName);
} catch (MetaException e) {
throw new IllegalArgumentException("Invalid partition name", e);
}
// Get the partition keys.
List<String> partitionKeys = null;
if (tableDto != null) {
partitionKeys = tableDto.getPartition_keys();
}
// If table has not been provided, return the values without validating.
if (partitionKeys != null) {
final List<String> partVals = Lists.newArrayListWithCapacity(partitionKeys.size());
for (String key : partitionKeys) {
final String val = hm.get(key);
if (val == null) {
throw new IllegalArgumentException("Invalid partition name - missing " + key);
}
partVals.add(val);
}
return partVals;
} else {
return Lists.newArrayList(hm.values());
}
}
/**
* {@inheritDoc}
*/
@Override
public String getNameFromPartVals(final TableDto tableDto, final List<String> partVals) {
final List<String> partitionKeys = tableDto.getPartition_keys();
if (partitionKeys.size() != partVals.size()) {
throw new IllegalArgumentException("Not the same number of partition columns and partition values");
}
return FileUtils.makePartName(partitionKeys, partVals, "");
}
/**
* {@inheritDoc}
*/
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
final Partition result = new Partition();
final QualifiedName name = partitionDto.getName();
List<String> values = Lists.newArrayListWithCapacity(16);
String databaseName = null;
String tableName = null;
if (name != null) {
if (name.getPartitionName() != null) {
//
// Unescape the partition name to get the right partition values.
// Partition name always are escaped where as the parition values are not.
//
values = getPartValsFromName(tableDto, name.getPartitionName());
}
if (name.getDatabaseName() != null) {
databaseName = name.getDatabaseName();
}
if (name.getTableName() != null) {
tableName = name.getTableName();
}
}
result.setValues(values);
result.setDbName(databaseName);
result.setTableName(tableName);
Map<String, String> metadata = partitionDto.getMetadata();
if (metadata == null) {
metadata = Maps.newHashMap();
}
result.setParameters(metadata);
result.setSd(fromStorageDto(partitionDto.getSerde(), tableName));
final StorageDescriptor sd = result.getSd();
if (tableDto != null) {
if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(
sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
}
final List<FieldDto> fields = tableDto.getFields();
if (fields == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(fields.stream()
.filter(field -> !field.isPartition_key())
.map(this::metacatToHiveField)
.collect(Collectors.toList()));
}
}
final AuditDto auditDto = partitionDto.getAudit();
if (auditDto != null) {
if (auditDto.getCreatedDate() != null) {
result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
if (auditDto.getLastModifiedDate() != null) {
result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
}
}
return result;
}
}
| 9,756 |
0 |
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat
|
Create_ds/metacat/metacat-thrift/src/main/java/com/netflix/metacat/thrift/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This package includes thrift implementation classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.thrift;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,757 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorDatabaseService;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.inject.Inject;
import javax.sql.DataSource;
/**
* Specific overrides of the JdbcDatabaseService implementation for PostgreSQL.
*
* @author tgianos
* @see JdbcConnectorDatabaseService
* @since 1.0.0
*/
public class PostgreSqlConnectorDatabaseService extends JdbcConnectorDatabaseService {
/**
* Constructor.
*
* @param dataSource The PostgreSQL datasource to use
* @param exceptionMapper The exception mapper to use to convert from SQLException's to ConnectorException's
*/
@Inject
public PostgreSqlConnectorDatabaseService(
@Nonnull @NonNull final DataSource dataSource,
@Nonnull @NonNull final JdbcExceptionMapper exceptionMapper
) {
super(dataSource, exceptionMapper);
}
/**
* {@inheritDoc}
*/
@Override
public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final DatabaseInfo resource) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* {@inheritDoc}
*/
@Override
public void delete(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 9,758 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.netflix.metacat.common.type.ArrayType;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nonnull;
/**
* Type converter for PostgreSql.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
public class PostgreSqlTypeConverter extends JdbcTypeConverter {
private static final String ARRAY = "array";
private static final String SINGLE_ARRAY = "[]";
private static final String MULTI_ARRAY = "[][]";
/**
* {@inheritDoc}
*
* @see <a href="https://www.postgresql.org/docs/current/static/datatype.html">PosgreSQL Data Types</a>
*/
@Override
public Type toMetacatType(@Nonnull @NonNull final String type) {
// See: https://www.postgresql.org/docs/current/static/datatype.html
final String lowerType = type.toLowerCase();
// Split up the possible type: TYPE[(size, magnitude)] EXTRA
final String[] splitType = this.splitType(lowerType);
final Type elementType;
switch (splitType[0]) {
case "smallint":
case "int2":
elementType = BaseType.SMALLINT;
break;
case "int":
case "integer":
case "int4":
elementType = BaseType.INT;
break;
case "int8":
case "bigint":
elementType = BaseType.BIGINT;
break;
case "decimal":
case "numeric":
elementType = this.toMetacatDecimalType(splitType);
break;
case "real":
case "float4":
elementType = BaseType.FLOAT;
break;
case "double precision":
case "float8":
elementType = BaseType.DOUBLE;
break;
case "character varying":
case "varchar":
elementType = this.toMetacatVarcharType(splitType);
break;
case "character":
case "char":
elementType = this.toMetacatCharType(splitType);
break;
case "text":
elementType = BaseType.STRING;
break;
case "bytea":
elementType = VarbinaryType.createVarbinaryType(Integer.MAX_VALUE);
break;
case "timestamp":
elementType = this.toMetacatTimestampType(splitType);
break;
case "timestampz":
elementType = BaseType.TIMESTAMP_WITH_TIME_ZONE;
break;
case "date":
elementType = BaseType.DATE;
break;
case "time":
elementType = this.toMetacatTimeType(splitType);
break;
case "timez":
elementType = BaseType.TIME_WITH_TIME_ZONE;
break;
case "boolean":
case "bool":
elementType = BaseType.BOOLEAN;
break;
case "bit":
case "bit varying":
case "varbit":
elementType = this.toMetacatBitType(splitType);
break;
case "json":
elementType = BaseType.JSON;
break;
case "smallserial":
case "serial2":
case "serial":
case "serial4":
case "bigserial":
case "serial8":
case "money":
case "interval":
case "enum":
case "point":
case "line":
case "lseg":
case "box":
case "path":
case "polygon":
case "circle":
case "cidr":
case "inet":
case "macaddr":
case "tsvector":
case "tsquery":
case "uuid":
case "xml":
case "int4range":
case "int8range":
case "numrange":
case "tsrange":
case "tstzrange":
case "daterange":
case "oid":
case "regproc":
case "regprocedure":
case "regoper":
case "regoperator":
case "regclass":
case "regtype":
case "regrole":
case "regnamespace":
case "regconfig":
case "regdictionary":
case "pg_lsn":
case "jsonb":
case "txid_snapshot":
default:
// TODO: Will catch complex types but not sure how to parse beyond that right now, may be recursive?
// https://www.postgresql.org/docs/current/static/rowtypes.html
log.info("Encountered {} type. Returning unknown type", splitType[0]);
return BaseType.UNKNOWN;
}
return this.checkForArray(splitType, elementType);
}
/**
* {@inheritDoc}
*/
@Override
public String fromMetacatType(@Nonnull @NonNull final Type type) {
switch (type.getTypeSignature().getBase()) {
case ARRAY:
if (!(type instanceof ArrayType)) {
throw new IllegalArgumentException("Expected an ARRAY type but was " + type.getClass().getName());
}
final ArrayType arrayType = (ArrayType) type;
final String array;
final Type elementType;
// Check for nested arrays
if (arrayType.getElementType() instanceof ArrayType) {
array = MULTI_ARRAY;
elementType = ((ArrayType) arrayType.getElementType()).getElementType();
} else {
array = SINGLE_ARRAY;
elementType = arrayType.getElementType();
}
// Recursively determine the type of the array
return this.fromMetacatType(elementType) + array;
case BIGINT:
return "BIGINT";
case BOOLEAN:
return "BOOLEAN";
case CHAR:
if (!(type instanceof CharType)) {
throw new IllegalArgumentException("Expected CHAR type but was " + type.getClass().getName());
}
final CharType charType = (CharType) type;
return "CHAR(" + charType.getLength() + ")";
case DATE:
return "DATE";
case DECIMAL:
if (!(type instanceof DecimalType)) {
throw new IllegalArgumentException("Expected decimal type but was " + type.getClass().getName());
}
final DecimalType decimalType = (DecimalType) type;
return "NUMERIC(" + decimalType.getPrecision() + ", " + decimalType.getScale() + ")";
case DOUBLE:
return "DOUBLE PRECISION";
case FLOAT:
return "REAL";
case INT:
return "INT";
case INTERVAL_DAY_TO_SECOND:
// TODO: It does but not sure how best to represent now
throw new UnsupportedOperationException("PostgreSQL doesn't support interval types");
case INTERVAL_YEAR_TO_MONTH:
// TODO: It does but not sure how best to represent now
throw new UnsupportedOperationException("PostgreSQL doesn't support interval types");
case JSON:
return "JSON";
case MAP:
throw new UnsupportedOperationException("PostgreSQL doesn't support map types");
case ROW:
// TODO: Well it does but how do we know what the internal type is?
throw new UnsupportedOperationException("PostgreSQL doesn't support row types");
case SMALLINT:
return "SMALLINT";
case STRING:
return "TEXT";
case TIME:
return "TIME";
case TIME_WITH_TIME_ZONE:
return "TIME WITH TIME ZONE";
case TIMESTAMP:
return "TIMESTAMP";
case TIMESTAMP_WITH_TIME_ZONE:
return "TIMESTAMP WITH TIME ZONE";
case TINYINT:
// NOTE: There is no tiny int type in PostgreSQL so using slightly larger SMALLINT
return "SMALLINT";
case UNKNOWN:
throw new IllegalArgumentException("Can't map an unknown type");
case VARBINARY:
return "BYTEA";
case VARCHAR:
if (!(type instanceof VarcharType)) {
throw new IllegalArgumentException("Expected varchar type but was " + type.getClass().getName());
}
final VarcharType varcharType = (VarcharType) type;
// NOTE: PostgreSQL lets you store up to 1GB in a varchar field which is about the same as TEXT
return "CHARACTER VARYING(" + varcharType.getLength() + ")";
default:
throw new IllegalArgumentException("Unknown type " + type.getTypeSignature().getBase());
}
}
private Type checkForArray(@Nonnull @NonNull final String[] splitType, @Nonnull @NonNull final Type elementType) {
if (SINGLE_ARRAY.equals(splitType[3]) || ARRAY.equals(splitType[4])) {
return new ArrayType(elementType);
} else if (MULTI_ARRAY.equals(splitType[3])) {
return new ArrayType(new ArrayType(elementType));
} else {
return elementType;
}
}
}
| 9,759 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlExceptionMapper.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.sql.SQLException;
/**
* Exception mapper for PostgreSQL SQLExceptions.
*
* @author tgianos
* @author zhenl
* @see SQLException
* @see ConnectorException
* @see <a href="https://www.postgresql.org/docs/current/static/errcodes-appendix.html">PostgreSQL Ref</a>
* @since 1.0.0
*/
public class PostgreSqlExceptionMapper implements JdbcExceptionMapper {
/**
* {@inheritDoc}
*/
@Override
public ConnectorException toConnectorException(
@Nonnull @NonNull final SQLException se,
@Nonnull @NonNull final QualifiedName name
) {
final String sqlState = se.getSQLState();
if (sqlState == null) {
throw new ConnectorException(se.getMessage(), se);
}
switch (sqlState) {
case "42P04": //database already exists
return new DatabaseAlreadyExistsException(name, se);
case "42P07": //table already exists
return new TableAlreadyExistsException(name, se);
case "3D000":
case "3F000": //database does not exist
return new DatabaseNotFoundException(name, se);
case "42P01": //table doesn't exist
return new TableNotFoundException(name, se);
default:
return new ConnectorException(se.getMessage(), se);
}
}
}
| 9,760 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlConnectorModule.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.google.inject.AbstractModule;
import com.google.inject.Scopes;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.jdbc.JdbcExceptionMapper;
import com.netflix.metacat.connector.jdbc.JdbcTypeConverter;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorPartitionService;
import com.netflix.metacat.connector.jdbc.services.JdbcConnectorTableService;
import lombok.NonNull;
import javax.annotation.Nonnull;
import javax.sql.DataSource;
import java.util.Map;
/**
* A Guice Module for the MySqlConnector.
*
* @author tgianos
* @since 1.0.0
*/
public class PostgreSqlConnectorModule extends AbstractModule {
private final String catalogShardName;
private final Map<String, String> configuration;
/**
* Constructor.
*
* @param catalogShardName catalog shard name
* @param configuration connector configuration
*/
PostgreSqlConnectorModule(
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
this.catalogShardName = catalogShardName;
this.configuration = configuration;
}
/**
* {@inheritDoc}
*/
@Override
protected void configure() {
this.bind(DataSource.class).toInstance(DataSourceManager.get()
.load(this.catalogShardName, this.configuration).get(this.catalogShardName));
this.bind(JdbcTypeConverter.class).to(PostgreSqlTypeConverter.class).in(Scopes.SINGLETON);
this.bind(JdbcExceptionMapper.class).to(PostgreSqlExceptionMapper.class).in(Scopes.SINGLETON);
this.bind(ConnectorDatabaseService.class)
.to(ConnectorUtils.getDatabaseServiceClass(this.configuration, PostgreSqlConnectorDatabaseService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorTableService.class)
.to(ConnectorUtils.getTableServiceClass(this.configuration, JdbcConnectorTableService.class))
.in(Scopes.SINGLETON);
this.bind(ConnectorPartitionService.class)
.to(ConnectorUtils.getPartitionServiceClass(this.configuration, JdbcConnectorPartitionService.class))
.in(Scopes.SINGLETON);
}
}
| 9,761 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.DefaultConnectorFactory;
import lombok.NonNull;
import javax.annotation.Nonnull;
import java.util.Map;
/**
* PostgreSQL implementation of a connector factory.
*
* @author tgianos
* @since 1.0.0
*/
class PostgreSqlConnectorFactory extends DefaultConnectorFactory {
/**
* Constructor.
*
* @param name catalog name
* @param catalogShardName catalog shard name
* @param configuration catalog configuration
*/
PostgreSqlConnectorFactory(
@Nonnull @NonNull final String name,
@Nonnull @NonNull final String catalogShardName,
@Nonnull @NonNull final Map<String, String> configuration
) {
super(name, catalogShardName,
Lists.newArrayList(new PostgreSqlConnectorModule(catalogShardName, configuration)));
}
}
| 9,762 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/PostgreSqlConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.postgresql;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import lombok.NonNull;
import javax.annotation.Nonnull;
/**
* Implementation of the ConnectorPlugin interface for PostgreSQL.
*
* @author tgianos
* @since 1.0.0
*/
public class PostgreSqlConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "postgresql";
private static final PostgreSqlTypeConverter TYPE_CONVERTER = new PostgreSqlTypeConverter();
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(@Nonnull @NonNull final ConnectorContext connectorContext) {
return new PostgreSqlConnectorFactory(connectorContext.getCatalogName(),
connectorContext.getCatalogShardName(), connectorContext.getConfiguration());
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return TYPE_CONVERTER;
}
}
| 9,763 |
0 |
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-postgresql/src/main/java/com/netflix/metacat/connector/postgresql/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes related to creating a connector for PostgreSQL for Metacat.
*
* @author tgianos
* @since 1.0.0
*/
package com.netflix.metacat.connector.postgresql;
| 9,764 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/package-info.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Common server classes.
*
* @author amajumdar
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.server;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,765 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/SpringConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.core.env.MapPropertySource;
import org.springframework.core.env.StandardEnvironment;
/**
* Spring based Connector Factory.
*
* @author zhenl
* @since 1.1.0
*/
public abstract class SpringConnectorFactory implements ConnectorFactory {
protected final AnnotationConfigApplicationContext ctx;
private final String catalogName;
private final String catalogShardName;
/**
* Constructor.
*
* @param connectorInfoConverter connector info converter
* @param connectorContext connector related config
*/
public SpringConnectorFactory(final ConnectorInfoConverter connectorInfoConverter,
final ConnectorContext connectorContext) {
this.catalogName = connectorContext.getCatalogName();
this.catalogShardName = connectorContext.getCatalogShardName();
this.ctx = new AnnotationConfigApplicationContext();
this.ctx.setEnvironment(new StandardEnvironment());
this.ctx.getBeanFactory().registerSingleton("ConnectorContext", connectorContext);
this.ctx.getBeanFactory().registerSingleton("ConnectorInfoConverter", connectorInfoConverter);
}
/**
* registerclasses to context.
* Known issue: can not register the two beans that are the same class but have different qualifiers
* @param clazz classes object.
*/
protected void registerClazz(final Class<?>... clazz) {
this.ctx.register(clazz);
}
/**
* Add property source to env.
*
* @param properties Property source for enviroment.
*/
protected void addEnvProperties(final MapPropertySource properties) {
this.ctx.getEnvironment().getPropertySources().addFirst(properties);
}
/**
* refresh the context.
*/
public void refresh() {
this.ctx.refresh();
}
/**
* {@inheritDoc}
*/
@Override
public void stop() {
this.ctx.close();
}
/**
* {@inheritDoc}
*/
@Override
public String getCatalogName() {
return this.catalogName;
}
/**
* {@inheritDoc}
*/
@Override
public String getCatalogShardName() {
return catalogShardName;
}
}
| 9,766 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorPlugin.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
/**
* Plugin interface implemented by Connectors.
*
* @author amajumdar
* @since 1.0.0
*/
public interface ConnectorPlugin {
/**
* Returns the type of the plugin.
*
* @return Returns the type of the plugin.
*/
String getType();
/**
* Returns the service implementation for the type.
*
* @param connectorContext registry for spectator
* @return connector factory
*/
ConnectorFactory create(ConnectorContext connectorContext);
/**
* Returns the type convertor of the connector.
*
* @return Returns the type convertor of the connector.
*/
ConnectorTypeConverter getTypeConverter();
/**
* Returns the dto converter implementation of the connector.
*
* @return Returns the dto converter implementation of the connector.
*/
default ConnectorInfoConverter getInfoConverter() {
return new ConnectorInfoConverter() {
};
}
}
| 9,767 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorFactoryDecorator.java
|
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
/**
* A decorator for a connector factory to add additional cross-cutting functionality
* to all connector services.
*/
@Slf4j
public class ConnectorFactoryDecorator implements ConnectorFactory {
private final ConnectorPlugin connectorPlugin;
@Getter
private final ConnectorFactory delegate;
private final ConnectorContext connectorContext;
private final RateLimiter rateLimiter;
private final boolean rateLimiterEnabled;
/**
* Creates the decorated connector factory that wraps connector services
* with additional wrappers.
*
* @param connectorPlugin the underlying plugin
* @param connectorContext the connector context for the underlying plugin
*/
public ConnectorFactoryDecorator(@NonNull final ConnectorPlugin connectorPlugin,
@NonNull final ConnectorContext connectorContext) {
this.connectorPlugin = connectorPlugin;
this.delegate = connectorPlugin.create(connectorContext);
this.connectorContext = connectorContext;
this.rateLimiter = connectorContext.getApplicationContext().getBean(RateLimiter.class);
// we can cache this config at startup since this is the connector level config
// that does not change later. Actual decision to enable and enforce throttling
// is in the rate limiter implementation which is more dynamic and accommodates
// changes from the Metacat dynamic configuration.
this.rateLimiterEnabled = isRateLimiterEnabled();
}
@Override
public ConnectorCatalogService getCatalogService() {
ConnectorCatalogService service = delegate.getCatalogService();
if (rateLimiterEnabled) {
log.info("Creating rate-limited connector catalog services for connector-type: {}, "
+ "plugin-type: {}, catalog: {}, shard: {}",
connectorContext.getConnectorType(), connectorPlugin.getType(),
connectorContext.getCatalogName(), connectorContext.getCatalogShardName());
service = new ThrottlingConnectorCatalogService(service, rateLimiter);
}
return service;
}
@Override
public ConnectorDatabaseService getDatabaseService() {
ConnectorDatabaseService service = delegate.getDatabaseService();
if (rateLimiterEnabled) {
log.info("Creating rate-limited connector database services for connector-type: {}, "
+ "plugin-type: {}, catalog: {}, shard: {}",
connectorContext.getConnectorType(), connectorPlugin.getType(),
connectorContext.getCatalogName(), connectorContext.getCatalogShardName());
service = new ThrottlingConnectorDatabaseService(service, rateLimiter);
}
return service;
}
@Override
public ConnectorTableService getTableService() {
ConnectorTableService service = delegate.getTableService();
if (rateLimiterEnabled) {
log.info("Creating rate-limited connector table services for connector-type: {}, "
+ "plugin-type: {}, catalog: {}, shard: {}",
connectorContext.getConnectorType(), connectorPlugin.getType(),
connectorContext.getCatalogName(), connectorContext.getCatalogShardName());
service = new ThrottlingConnectorTableService(service, rateLimiter);
}
return service;
}
@Override
public ConnectorPartitionService getPartitionService() {
ConnectorPartitionService service = delegate.getPartitionService();
if (rateLimiterEnabled) {
log.info("Creating rate-limited connector partition services for connector-type: {}, "
+ "plugin-type: {}, catalog: {}, shard: {}",
connectorContext.getConnectorType(), connectorPlugin.getType(),
connectorContext.getCatalogName(), connectorContext.getCatalogShardName());
service = new ThrottlingConnectorPartitionService(service, rateLimiter);
}
return service;
}
@Override
public String getCatalogName() {
return delegate.getCatalogName();
}
@Override
public String getCatalogShardName() {
return delegate.getCatalogShardName();
}
@Override
public void stop() {
delegate.stop();
}
private boolean isRateLimiterEnabled() {
if (connectorContext.getConfiguration() == null) {
return true;
}
return !Boolean.parseBoolean(
connectorContext.getConfiguration().getOrDefault("connector.rate-limiter-exempted", "false")
);
}
}
| 9,768 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorDatabaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import java.util.List;
/**
* Interfaces for manipulating database information for this connector.
*
* @author tgianos
* @since 1.0.0
*/
public interface ConnectorDatabaseService extends ConnectorBaseService<DatabaseInfo> {
/**
* Returns a list of view names for database identified by <code>name</code>.
*
* @param context The request context
* @param databaseName The name of the database under which to list resources of type <code>T</code>
* @return A list of view qualified names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<QualifiedName> listViewNames(
final ConnectorRequestContext context,
final QualifiedName databaseName
) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 9,769 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ThrottlingConnectorDatabaseService.java
|
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiterRequestContext;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
/**
* Connector that throttles calls to the connector based on the contextual request name
* and the resource. Not all APIs can be throttles since we may not have a resource
* but those are a small monitory
*/
@Slf4j
@RequiredArgsConstructor
public class ThrottlingConnectorDatabaseService implements ConnectorDatabaseService {
@Getter
@NonNull
private final ConnectorDatabaseService delegate;
@NonNull
private final RateLimiter rateLimiter;
@Override
public void create(final ConnectorRequestContext context, final DatabaseInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.create(context, resource);
}
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.update(context, resource);
}
@Override
public void delete(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
delegate.delete(context, name);
}
@Override
public DatabaseInfo get(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.get(context, name);
}
@Override
@SuppressFBWarnings
public boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.exists(context, name);
}
@Override
public List<DatabaseInfo> list(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix, @Nullable final Sort sort,
@Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.list(context, name, prefix, sort, pageable);
}
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix, @Nullable final Sort sort,
@Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.listNames(context, name, prefix, sort, pageable);
}
@Override
public void rename(final ConnectorRequestContext context, final QualifiedName oldName,
final QualifiedName newName) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), oldName);
delegate.rename(context, oldName, newName);
}
@Override
public List<QualifiedName> listViewNames(final ConnectorRequestContext context, final QualifiedName databaseName) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), databaseName);
return delegate.listViewNames(context, databaseName);
}
private void checkThrottling(final String requestName, final QualifiedName resource) {
if (rateLimiter.hasExceededRequestLimit(new RateLimiterRequestContext(requestName, resource))) {
final String errorMsg = String.format("Too many requests for resource %s. Request: %s",
resource, requestName);
log.warn(errorMsg);
throw new MetacatTooManyRequestsException(errorMsg);
}
}
@Override
public boolean equals(final Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
}
| 9,770 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorTypeConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import com.netflix.metacat.common.json.MetacatJsonLocator;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.ParametricType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeEnum;
import com.netflix.metacat.common.type.VarbinaryType;
import com.netflix.metacat.common.type.VarcharType;
/**
* Canonical type converter class.
*
* @author tgianos
* @author zhenl
* @since 1.0.0
*/
public interface ConnectorTypeConverter {
/**
* Converts to metacat type.
*
* @param type type
* @return metacat type
*/
Type toMetacatType(String type);
/**
* Converts from metacat type.
*
* @param type type
* @return connector type
*/
String fromMetacatType(Type type);
/**
* Converts from Metacat type to JSON format.
* @param type type
* @return Type in JSON format
*/
default JsonNode fromMetacatTypeToJson(Type type) {
final MetacatJsonLocator json = new MetacatJsonLocator();
JsonNode result = null;
final TypeEnum base = type.getTypeSignature().getBase();
if (!base.isParametricType()) {
result = new TextNode(fromMetacatType(type));
} else if (type instanceof DecimalType || type instanceof CharType
|| type instanceof VarcharType || type instanceof VarbinaryType) {
final ObjectNode node = json.emptyObjectNode();
final String typeText = fromMetacatType(type);
final int index = typeText.indexOf('(');
if (index == -1) {
node.put("type", typeText);
} else {
node.put("type", typeText.substring(0, index));
if (type instanceof DecimalType) {
node.put("precision", ((DecimalType) type).getPrecision());
node.put("scale", ((DecimalType) type).getScale());
} else if (type instanceof CharType) {
node.put("length", ((CharType) type).getLength());
} else if (type instanceof VarcharType) {
node.put("length", ((VarcharType) type).getLength());
} else {
node.put("length", ((VarbinaryType) type).getLength());
}
}
result = node;
} else if (base.equals(TypeEnum.MAP)) {
final MapType mapType = (MapType) type;
final ObjectNode node = json.emptyObjectNode();
node.put("type", TypeEnum.MAP.getType());
node.set("keyType", fromMetacatTypeToJson(mapType.getKeyType()));
node.set("valueType", fromMetacatTypeToJson(mapType.getValueType()));
result = node;
} else if (base.equals(TypeEnum.ROW)) {
final RowType rowType = (RowType) type;
final ObjectNode node = json.emptyObjectNode();
final ArrayNode fieldsNode = node.arrayNode();
rowType.getFields().forEach(f -> {
final ObjectNode fieldNode = json.emptyObjectNode();
fieldNode.put("name", f.getName());
fieldNode.set("type", fromMetacatTypeToJson(f.getType()));
fieldsNode.add(fieldNode);
});
node.put("type", TypeEnum.ROW.getType());
node.set("fields", fieldsNode);
result = node;
} else if (base.equals(TypeEnum.ARRAY)) {
final ObjectNode node = json.emptyObjectNode();
node.put("type", TypeEnum.ARRAY.getType());
((ParametricType) type).getParameters().stream().findFirst()
.ifPresent(t -> node.set("elementType", fromMetacatTypeToJson(t)));
result = node;
}
return result;
}
}
| 9,771 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorBaseService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.model.BaseInfo;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javax.annotation.Nullable;
import java.util.List;
/**
* Generic interface for methods pertaining to resources from connectors such as Databases, Tables and Partitions.
*
* @param <T> The Type of resource this interface works for
* @author tgianos
* @since 1.0.0
*/
public interface ConnectorBaseService<T extends BaseInfo> {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not implemented for this connector";
/**
* Create a resource.
*
* @param context The request context
* @param resource The resource metadata
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default void create(final ConnectorRequestContext context, final T resource) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Update a resource with the given metadata.
*
* @param context The request context
* @param resource resource metadata
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default void update(final ConnectorRequestContext context, final T resource) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Delete a database with the given qualified name.
*
* @param context The request context
* @param name The qualified name of the resource to delete
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default void delete(final ConnectorRequestContext context, final QualifiedName name) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Return a resource with the given name.
*
* @param context The request context
* @param name The qualified name of the resource to get
* @return The resource metadata.
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default T get(final ConnectorRequestContext context, final QualifiedName name) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Return true, if the resource exists.
*
* @param context The request context
* @param name The qualified name of the resource to get
* @return Return true, if the resource exists.
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
@SuppressFBWarnings
default boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
boolean result = false;
try {
result = get(context, name) != null;
} catch (NotFoundException ignored) {
// name does not exists.
}
return result;
}
/**
* Get a list of all the resources under the given resource identified by <code>name</code>. Optionally sort by
* <code>sort</code> and add pagination via <code>pageable</code>.
*
* @param context The request context
* @param name The name of the resource under which to list resources of type <code>T</code>
* @param prefix The optional prefix to apply to filter resources for listing
* @param sort Optional sorting parameters
* @param pageable Optional paging parameters
* @return A list of type <code>T</code> resources in the desired order if required
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<T> list(
final ConnectorRequestContext context,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns a list of qualified names of resources under the resource identified by <code>name</code>.
*
* @param context The request context
* @param name The name of the resource under which to list resources of type <code>T</code>
* @param prefix The optional prefix to apply to filter resources for listing
* @param sort Optional sorting parameters
* @param pageable Optional paging parameters
* @return A list of Qualified Names of resources in the desired order if required
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<QualifiedName> listNames(
final ConnectorRequestContext context,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Rename the specified resource.
*
* @param context The metacat request context
* @param oldName The current resource name
* @param newName The new resource name
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default void rename(
final ConnectorRequestContext context,
final QualifiedName oldName,
final QualifiedName newName
) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 9,772 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorCatalogService.java
|
/*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.server.connectors.model.CatalogInfo;
/**
* Interfaces for manipulating catalog information for this connector.
*
* @author rveeramacheneni
* @since 1.3.0
*/
public interface ConnectorCatalogService extends ConnectorBaseService<CatalogInfo> {
}
| 9,773 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorUtils.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Utility methods shared by all Connectors.
*
* @author tgianos
* @since 1.0.0
*/
public class ConnectorUtils {
/**
* The key which a user can set a value in a catalog to override the default database service class.
*/
private static final String DATABASE_SERVICE_CLASS_KEY = "metacat.connector.databaseService.class";
/**
* The key which a user can set a value in a catalog to override the default table service class.
*/
private static final String TABLE_SERVICE_CLASS_KEY = "metacat.connector.tableService.class";
/**
* The key which a user can set a value in a catalog to override the default partition service class.
*/
private static final String PARTITION_SERVICE_CLASS_KEY = "metacat.connector.partitionService.class";
/**
* Protected constructor for utility class.
*/
protected ConnectorUtils() {
}
/**
* Sort the Qualified Names using the comparator in the desired order.
*
* @param <T> The type of elements to sort
* @param elements The list to sort
* @param sort The sort object defining ascending or descending order
* @param comparator The comparator to use
*/
public static <T> void sort(
final List<T> elements,
final Sort sort,
final Comparator<T> comparator
) {
switch (sort.getOrder()) {
case DESC:
elements.sort(comparator.reversed());
break;
case ASC:
default:
elements.sort(comparator);
}
}
/**
* If the user desires pagination this method will take the list and break it up into the correct chunk. If not it
* will return the whole list.
*
* @param <T> The type of elements to paginate
* @param elements The elements to paginate
* @param pageable The pagination parameters or null if no pagination required
* @return The final list of qualified names
*/
public static <T> List<T> paginate(
final List<T> elements,
@Nullable final Pageable pageable
) {
final ImmutableList.Builder<T> results = ImmutableList.builder();
if (pageable != null && pageable.isPageable()) {
results.addAll(
elements
.stream()
.skip(pageable.getOffset())
.limit(pageable.getLimit())
.collect(Collectors.toList())
);
} else {
results.addAll(elements);
}
return results.build();
}
/**
* Get the database service class to use.
*
* @param configuration The connector configuration
* @param defaultServiceClass The default class to use if an override is not found
* @return The database service class to use.
*/
public static Class<? extends ConnectorDatabaseService> getDatabaseServiceClass(
final Map<String, String> configuration,
final Class<? extends ConnectorDatabaseService> defaultServiceClass
) {
if (configuration.containsKey(DATABASE_SERVICE_CLASS_KEY)) {
final String className = configuration.get(DATABASE_SERVICE_CLASS_KEY);
return getServiceClass(className, ConnectorDatabaseService.class);
} else {
return defaultServiceClass;
}
}
/**
* Get the table service class to use.
*
* @param configuration The connector configuration
* @param defaultServiceClass The default class to use if an override is not found
* @return The table service class to use.
*/
public static Class<? extends ConnectorTableService> getTableServiceClass(
final Map<String, String> configuration,
final Class<? extends ConnectorTableService> defaultServiceClass
) {
if (configuration.containsKey(TABLE_SERVICE_CLASS_KEY)) {
final String className = configuration.get(TABLE_SERVICE_CLASS_KEY);
return getServiceClass(className, ConnectorTableService.class);
} else {
return defaultServiceClass;
}
}
/**
* Get the partition service class to use.
*
* @param configuration The connector configuration
* @param defaultServiceClass The default class to use if an override is not found
* @return The partition service class to use.
*/
public static Class<? extends ConnectorPartitionService> getPartitionServiceClass(
final Map<String, String> configuration,
final Class<? extends ConnectorPartitionService> defaultServiceClass
) {
if (configuration.containsKey(PARTITION_SERVICE_CLASS_KEY)) {
final String className = configuration.get(PARTITION_SERVICE_CLASS_KEY);
return getServiceClass(className, ConnectorPartitionService.class);
} else {
return defaultServiceClass;
}
}
private static <S extends ConnectorBaseService> Class<? extends S> getServiceClass(
final String className,
final Class<? extends S> baseClass
) {
try {
return Class.forName(className).asSubclass(baseClass);
} catch (final ClassNotFoundException cnfe) {
throw Throwables.propagate(cnfe);
}
}
}
| 9,774 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/DefaultConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* Common connector factory with repeatable functionality.
*
* @author tgianos
* @since 1.0.0
*/
@Slf4j
@Getter
public class DefaultConnectorFactory implements ConnectorFactory {
private final String catalogName;
private final String catalogShardName;
private final Injector injector;
/**
* Constructor.
*
* @param catalogName catalog name
* @param catalogShardName catalog shard name
* @param modules The connector modules to create
*/
public DefaultConnectorFactory(
final String catalogName,
final String catalogShardName,
final Iterable<? extends Module> modules
) {
log.info("Creating connector factory for catalog {}", catalogName);
this.catalogName = catalogName;
this.catalogShardName = catalogShardName;
this.injector = Guice.createInjector(modules);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.getService(ConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.getService(ConnectorTableService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorPartitionService getPartitionService() {
return this.getService(ConnectorPartitionService.class);
}
/**
* {@inheritDoc}
*/
@Override
public void stop() {
}
private <T extends ConnectorBaseService> T getService(final Class<T> serviceClass) {
final T service = this.injector.getInstance(serviceClass);
if (service != null) {
return service;
} else {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
}
| 9,775 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorRequestContext.java
|
/*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* The context of the request to Metacat.
*
* @author amajumdar
* @author tgianos
* @since 1.0.0
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class ConnectorRequestContext {
private long timestamp;
private String userName;
private boolean includeMetadata;
private boolean includeMetadataLocationOnly;
//TODO: Move this to a response object.
private boolean ignoreErrorsAfterUpdate;
}
| 9,776 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorPartitionService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import java.util.List;
import java.util.Map;
/**
* Interfaces for manipulating partition information for this connector.
*
* @author tgianos
* @since 1.0.0
*/
public interface ConnectorPartitionService extends ConnectorBaseService<PartitionInfo> {
/**
* Gets the Partitions based on a filter expression for the specified table.
*
* @param context The Metacat request context
* @param table table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @param tableInfo Table info object
* @return filtered list of partitions
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<PartitionInfo> getPartitions(
final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Add/Update/delete partitions for a table.
*
* @param context The Metacat request context
* @param table table handle to get partition for
* @param partitionsSaveRequest Partitions to save, alter or delete
* @return added/updated list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default PartitionsSaveResponse savePartitions(
final ConnectorRequestContext context,
final QualifiedName table,
final PartitionsSaveRequest partitionsSaveRequest
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Delete partitions for a table.
*
* @param context The Metacat request context
* @param tableName table name
* @param partitionNames list of partition names
* @param tableInfo table info object
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default void deletePartitions(
final ConnectorRequestContext context,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Number of partitions for the given table.
*
* @param context The Metacat request context
* @param table table handle
* @param tableInfo table info object
* @return Number of partitions
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default int getPartitionCount(
final ConnectorRequestContext context,
final QualifiedName table,
final TableInfo tableInfo
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Returns all the partition names referring to the given <code>uris</code>.
*
* @param context The Metacat request context
* @param uris locations
* @param prefixSearch if true, we look for tables whose location starts with the given <code>uri</code>
* @return map of uri to list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default Map<String, List<QualifiedName>> getPartitionNames(
final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Gets the partition names/keys based on a filter expression for the specified table.
*
* @param context The Metacat request context
* @param table table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @param tableInfo table info object
* @return filtered list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<String> getPartitionKeys(
final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Gets the partition uris based on a filter expression for the specified table.
*
* @param context The Metacat request context
* @param table table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @param tableInfo table info object
* @return filtered list of partition uris
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default List<String> getPartitionUris(
final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
}
| 9,777 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ThrottlingConnectorCatalogService.java
|
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiterRequestContext;
import com.netflix.metacat.common.server.connectors.model.CatalogInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
/**
* Connector that throttles calls to the connector based on the contextual request name
* and the resource. Not all APIs can be throttles since we may not have a resource
* but those are a small monitory
*/
@Slf4j
@RequiredArgsConstructor
public class ThrottlingConnectorCatalogService implements ConnectorCatalogService {
@Getter
@NonNull
private final ConnectorCatalogService delegate;
@NonNull
private final RateLimiter rateLimiter;
@Override
public void create(final ConnectorRequestContext context, final CatalogInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.create(context, resource);
}
@Override
public void update(final ConnectorRequestContext context, final CatalogInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.update(context, resource);
}
@Override
public void delete(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
delegate.delete(context, name);
}
@Override
public CatalogInfo get(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.get(context, name);
}
@Override
@SuppressFBWarnings
public boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.exists(context, name);
}
@Override
public List<CatalogInfo> list(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort, @Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.list(context, name, prefix, sort, pageable);
}
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort, @Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.listNames(context, name, prefix, sort, pageable);
}
@Override
public void rename(final ConnectorRequestContext context, final QualifiedName oldName,
final QualifiedName newName) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), oldName);
delegate.rename(context, oldName, newName);
}
private void checkThrottling(final String requestName, final QualifiedName resource) {
if (rateLimiter.hasExceededRequestLimit(new RateLimiterRequestContext(requestName, resource))) {
final String errorMsg = String.format("Too many requests for resource %s. Request: %s",
resource, requestName);
log.warn(errorMsg);
throw new MetacatTooManyRequestsException(errorMsg);
}
}
@Override
public boolean equals(final Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
}
| 9,778 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ThrottlingConnectorTableService.java
|
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiterRequestContext;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
/**
* Connector that throttles calls to the connector based on the contextual request name
* and the resource. Not all APIs can be throttles since we may not have a resource
* but those are a small monitory
*/
@Slf4j
@RequiredArgsConstructor
public class ThrottlingConnectorTableService implements ConnectorTableService {
@Getter
@NonNull
private final ConnectorTableService delegate;
@NonNull
private final RateLimiter rateLimiter;
@Override
public TableInfo get(final ConnectorRequestContext context,
final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.get(context, name);
}
@Override
public Map<String, List<QualifiedName>> getTableNames(final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch) {
return delegate.getTableNames(context, uris, prefixSearch);
}
@Override
public List<QualifiedName> getTableNames(final ConnectorRequestContext context,
final QualifiedName name,
final String filter,
final Integer limit) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.getTableNames(context, name, filter, limit);
}
@Override
public void create(final ConnectorRequestContext context, final TableInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.create(context, resource);
}
@Override
public void update(final ConnectorRequestContext context, final TableInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.update(context, resource);
}
@Override
public void delete(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
delegate.delete(context, name);
}
@Override
@SuppressFBWarnings
public boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.exists(context, name);
}
@Override
public List<TableInfo> list(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort, @Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.list(context, name, prefix, sort, pageable);
}
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort, @Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.listNames(context, name, prefix, sort, pageable);
}
@Override
public void rename(final ConnectorRequestContext context,
final QualifiedName oldName,
final QualifiedName newName) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), oldName);
delegate.rename(context, oldName, newName);
}
private void checkThrottling(final String requestName, final QualifiedName resource) {
if (rateLimiter.hasExceededRequestLimit(new RateLimiterRequestContext(requestName, resource))) {
final String errorMsg = String.format("Too many requests for resource %s. Request: %s",
resource, requestName);
log.warn(errorMsg);
throw new MetacatTooManyRequestsException(errorMsg);
}
}
@Override
public boolean equals(final Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
}
| 9,779 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorInfoConverter.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
/**
* Converter that converts Metacat dtos to connector represented types and vice versa.
*
* @param <D> Connector database type
* @param <T> Connector table type
* @param <P> Connector partition type
* @author amajumdar
* @since 1.0.0
*/
public interface ConnectorInfoConverter<D, T, P> {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported by this connector";
/**
* Converts to DatabaseDto.
*
* @param qualifiedName qualifiedName
* @param database connector database
* @return Metacat database dto
*/
default DatabaseInfo toDatabaseInfo(final QualifiedName qualifiedName, final D database) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Converts from DatabaseDto to the connector database.
*
* @param database Metacat database dto
* @return connector database
*/
default D fromDatabaseInfo(final DatabaseInfo database) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Converts to TableDto.
*
* @param qualifiedName qualifiedName
* @param table connector table
* @return Metacat table dto
*/
default TableInfo toTableInfo(final QualifiedName qualifiedName, final T table) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Converts from TableDto to the connector table.
*
* @param table Metacat table dto
* @return connector table
*/
default T fromTableInfo(final TableInfo table) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Converts to PartitionDto.
*
* @param tableInfo tableInfo
* @param partition connector partition
* @return Metacat partition dto
*/
default PartitionInfo toPartitionInfo(final TableInfo tableInfo, final P partition) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Converts from PartitionDto to the connector partition.
*
* @param tableInfo tableInfo
* @param partition Metacat partition dto
* @return connector partition
*/
default P fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partition) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
}
| 9,780 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorContext.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.spectator.api.Registry;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import org.springframework.context.ApplicationContext;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* Connector Config.
*/
@AllArgsConstructor(access = AccessLevel.PRIVATE)
@Builder
@Data
public class ConnectorContext {
/**
* Catalog name.
*/
private final String catalogName;
/**
* Catalog shard name.
*/
private final String catalogShardName;
/**
* Connector type.
*/
private final String connectorType;
/**
* Metacat config.
*/
private final Config config;
/**
* The registry for spectator.
*/
private final Registry registry;
/**
* Main application context.
*/
private final ApplicationContext applicationContext;
/**
* Metacat catalog configuration.
*/
private final Map<String, String> configuration;
/**
* Nested connector contexts.
*/
private final List<ConnectorContext> nestedConnectorContexts;
/**
* Default Ctor.
*
* @param catalogName the catalog name.
* @param catalogShardName the catalog shard name
* @param connectorType the connector type.
* @param config the application config.
* @param registry the registry.
* @param applicationContext the application context.
* @param configuration the connector properties.
*/
public ConnectorContext(final String catalogName, final String catalogShardName, final String connectorType,
final Config config, final Registry registry,
final ApplicationContext applicationContext, final Map<String, String> configuration) {
this(catalogName, catalogShardName, connectorType, config, registry,
applicationContext, configuration, Collections.emptyList());
}
}
| 9,781 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ThrottlingConnectorPartitionService.java
|
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatTooManyRequestsException;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiter;
import com.netflix.metacat.common.server.api.ratelimiter.RateLimiterRequestContext;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Getter;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
/**
* Connector that throttles calls to the connector based on the contextual request name
* and the resource. Not all APIs can be throttles since we may not have a resource
* but those are a small monitory
*/
@Slf4j
@RequiredArgsConstructor
public class ThrottlingConnectorPartitionService implements ConnectorPartitionService {
@Getter
@NonNull
private final ConnectorPartitionService delegate;
@NonNull
private final RateLimiter rateLimiter;
@Override
public List<PartitionInfo> getPartitions(final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), table);
return delegate.getPartitions(context, table, partitionsRequest, tableInfo);
}
@Override
public PartitionsSaveResponse savePartitions(final ConnectorRequestContext context,
final QualifiedName table,
final PartitionsSaveRequest partitionsSaveRequest) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), table);
return delegate.savePartitions(context, table, partitionsSaveRequest);
}
@Override
public void deletePartitions(final ConnectorRequestContext context,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), tableName);
delegate.deletePartitions(context, tableName, partitionNames, tableInfo);
}
@Override
public int getPartitionCount(final ConnectorRequestContext context,
final QualifiedName table,
final TableInfo tableInfo) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), table);
return delegate.getPartitionCount(context, table, tableInfo);
}
@Override
public Map<String, List<QualifiedName>> getPartitionNames(final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch) {
return delegate.getPartitionNames(context, uris, prefixSearch);
}
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), table);
return delegate.getPartitionKeys(context, table, partitionsRequest, tableInfo);
}
@Override
public List<String> getPartitionUris(final ConnectorRequestContext context,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), table);
return delegate.getPartitionUris(context, table, partitionsRequest, tableInfo);
}
@Override
public void create(final ConnectorRequestContext context, final PartitionInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.create(context, resource);
}
@Override
public void update(final ConnectorRequestContext context, final PartitionInfo resource) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), resource.getName());
delegate.update(context, resource);
}
@Override
public void delete(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
delegate.delete(context, name);
}
@Override
public PartitionInfo get(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.get(context, name);
}
@Override
@SuppressFBWarnings
public boolean exists(final ConnectorRequestContext context, final QualifiedName name) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.exists(context, name);
}
@Override
public List<PartitionInfo> list(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix, @Nullable final Sort sort,
@Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.list(context, name, prefix, sort, pageable);
}
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext context, final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort, @Nullable final Pageable pageable) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), name);
return delegate.listNames(context, name, prefix, sort, pageable);
}
@Override
public void rename(final ConnectorRequestContext context, final QualifiedName oldName,
final QualifiedName newName) {
checkThrottling(MetacatContextManager.getContext().getRequestName(), oldName);
delegate.rename(context, oldName, newName);
}
private void checkThrottling(final String requestName, final QualifiedName resource) {
if (rateLimiter.hasExceededRequestLimit(new RateLimiterRequestContext(requestName, resource))) {
final String errorMsg = String.format("Too many requests for resource %s. Request: %s",
resource, requestName);
log.warn(errorMsg);
throw new MetacatTooManyRequestsException(errorMsg);
}
}
@Override
public boolean equals(final Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
}
| 9,782 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorFactory.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
/**
* Factory that returns the connector implementations of the service and converter interfaces.
*
* @author amajumdar
* @since 1.0.0
*/
public interface ConnectorFactory {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported by this connector";
/**
* Returns the catalog service implementation of the connector.
*
* @return Returns the catalog service implementation of the connector.
*/
default ConnectorCatalogService getCatalogService() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the database service implementation of the connector.
*
* @return Returns the database service implementation of the connector.
*/
default ConnectorDatabaseService getDatabaseService() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table service implementation of the connector.
*
* @return Returns the table service implementation of the connector.
*/
default ConnectorTableService getTableService() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the partition service implementation of the connector.
*
* @return Returns the partition service implementation of the connector.
*/
default ConnectorPartitionService getPartitionService() {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the name of the catalog.
*
* @return Returns the name of the catalog.
*/
String getCatalogName();
/**
* Returns the name of the catalog shard.
*
* @return Returns the name of the catalog shard.
*/
String getCatalogShardName();
/**
* Shuts down the factory.
*/
void stop();
}
| 9,783 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes and interfaces related to SPI (Service Provider Interface) for catalog connectors.
*
* @author tgianos
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.server.connectors;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,784 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/ConnectorTableService.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import java.util.List;
import java.util.Map;
/**
* Service interface for connector to implement and expose Table related metadata.
*
* @author tgianos
* @since 1.0.0
*/
public interface ConnectorTableService extends ConnectorBaseService<TableInfo> {
/**
* Returns all the table names referring to the given <code>uris</code>.
*
* @param context The Metacat request context
* @param uris locations
* @param prefixSearch if true, we look for tables whose location starts with the given <code>uri</code>
* @return map of uri to list of partition names
* @throws UnsupportedOperationException If the connector doesn't implement this method
*/
default Map<String, List<QualifiedName>> getTableNames(
final ConnectorRequestContext context,
final List<String> uris,
final boolean prefixSearch
) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
/**
* Returns a filtered list of table names.
* @param context The Metacat request context
* @param filter filter expression
* @param name qualified name of either the catalog or database
* @param limit size of the list
* @return list of table names
*/
default List<QualifiedName> getTableNames(
final ConnectorRequestContext context,
final QualifiedName name,
final String filter,
final Integer limit) {
throw new UnsupportedOperationException(ConnectorBaseService.UNSUPPORTED_MESSAGE);
}
}
| 9,785 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/util/TimeUtil.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.util;
import java.util.concurrent.TimeUnit;
/**
* TimeUtil.
*
* @author zhenl
* @since 1.0.0
*/
public final class TimeUtil {
private TimeUtil() {
}
/**
* unitFor.
*
* @param inputUnit inputUnit
* @param defaultUnit defaultUnit
* @return TimeUnit
*/
public static TimeUnit unitFor(final String inputUnit, final TimeUnit defaultUnit) {
final String unit = inputUnit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("l")) {
if (defaultUnit == null) {
throw new IllegalArgumentException("Time unit is not specified");
}
return defaultUnit;
} else if (unit.equals("d") || unit.startsWith("day")) {
return TimeUnit.DAYS;
} else if (unit.equals("h") || unit.startsWith("hour")) {
return TimeUnit.HOURS;
} else if (unit.equals("m") || unit.startsWith("min")) {
return TimeUnit.MINUTES;
} else if (unit.equals("s") || unit.startsWith("sec")) {
return TimeUnit.SECONDS;
} else if (unit.equals("ms") || unit.startsWith("msec")) {
return TimeUnit.MILLISECONDS;
} else if (unit.equals("us") || unit.startsWith("usec")) {
return TimeUnit.MICROSECONDS;
} else if (unit.equals("ns") || unit.startsWith("nsec")) {
return TimeUnit.NANOSECONDS;
}
throw new IllegalArgumentException("Invalid time unit " + unit);
}
/**
* toTime.
*
* @param value value
* @param inputUnit inputUnit
* @param outUnit outUnit
* @return long
*/
public static long toTime(final String value, final TimeUnit inputUnit, final TimeUnit outUnit) {
final String[] parsed = parseTime(value.trim());
return outUnit.convert(Long.parseLong(parsed[0].trim().trim()), unitFor(parsed[1].trim(), inputUnit));
}
private static String[] parseTime(final String value) {
final char[] chars = value.toCharArray();
int i = 0;
while (i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i]))) {
i++;
}
return new String[]{value.substring(0, i), value.substring(i)};
}
}
| 9,786 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/util/package-info.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Connector utils.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.common.server.connectors.util;
import javax.annotation.ParametersAreNonnullByDefault;
| 9,787 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/PartitionInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.metacat.common.QualifiedName;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Partition DTO.
*
* @author amajumdar, zhenl
* @since 1.0.0
*/
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = true)
@AllArgsConstructor
@NoArgsConstructor
public final class PartitionInfo extends BaseInfo {
private StorageInfo serde;
//to populate the metrics from iceberg
private ObjectNode dataMetrics;
/**
* Constructor.
*
* @param name name of the partition
* @param auditInfo audit information of the partition
* @param metadata metadata of the partition.
* @param serde storage info of the partition
*/
@Builder
private PartitionInfo(
final QualifiedName name,
final AuditInfo auditInfo,
final Map<String, String> metadata,
final StorageInfo serde,
final ObjectNode dataMetrics
) {
super(name, auditInfo, metadata);
this.serde = serde;
this.dataMetrics = dataMetrics;
}
}
| 9,788 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/FieldInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.common.type.TypeSignature;
import io.swagger.annotations.ApiModel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
/**
* Field DTO.
*
* @author amajumdar
* @since 1.0.0
*/
@ApiModel(value = "Table field/column metadata")
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@NoArgsConstructor
@Builder
@AllArgsConstructor
public final class FieldInfo implements Serializable {
private static final long serialVersionUID = -762764635047711004L;
private String comment;
private String name;
private boolean partitionKey;
private String sourceType;
private transient Type type;
private Boolean isNullable;
private Integer size;
private String defaultValue;
private Boolean isSortKey;
private Boolean isIndexKey;
// setters and getters
private void writeObject(final ObjectOutputStream oos)
throws IOException {
oos.defaultWriteObject();
oos.writeObject(type == null ? null : type.getDisplayName());
}
private void readObject(final ObjectInputStream ois)
throws ClassNotFoundException, IOException {
ois.defaultReadObject();
final Object oSignature = ois.readObject();
if (oSignature != null) {
final String signatureString = (String) oSignature;
if (StringUtils.isNotBlank(signatureString)) {
final TypeSignature signature = TypeSignature.parseTypeSignature(signatureString);
this.setType((TypeRegistry.getTypeRegistry().getType(signature)));
}
}
}
}
| 9,789 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/ClusterInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* Cluster information.
*
* @author amajumdar
* @since 1.3.0
*/
@SuppressWarnings("unused")
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class ClusterInfo implements Serializable {
private static final long serialVersionUID = -3119788564952124498L;
/** Name of the cluster. */
private String name;
/** Type of the cluster. */
private String type;
/** Name of the account under which the cluster was created. Ex: "abc_test" */
private String account;
/** Id of Account under which the cluster was created. Ex: "abc_test" */
private String accountId;
/** Environment under which the cluster exists. Ex: "prod", "test" */
private String env;
/** Region in which the cluster exists. Ex: "us-east-1" */
private String region;
}
| 9,790 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/StorageInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.io.Serializable;
import java.util.Map;
/**
* Storage Info.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@EqualsAndHashCode(callSuper = false)
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class StorageInfo implements Serializable {
private static final long serialVersionUID = -1261997541007723844L;
private String inputFormat;
private String outputFormat;
private String owner;
private Map<String, String> parameters;
private Map<String, String> serdeInfoParameters;
private String serializationLib;
private String uri;
}
| 9,791 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/PartitionListRequest.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* Partition get request.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class PartitionListRequest {
private String filter;
private List<String> partitionNames;
private Boolean includePartitionDetails = false;
private Pageable pageable;
private Sort sort;
private Boolean includeAuditOnly = false;
}
| 9,792 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/CatalogInfo.java
|
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.QualifiedName;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Connector catalog information.
*
* @author rveeramacheneni
* @since 1.3.0
*/
@Data
@NoArgsConstructor
@EqualsAndHashCode(callSuper = true)
public final class CatalogInfo extends BaseInfo {
private ClusterInfo clusterInfo;
/**
* Constructor.
* @param name qualified name of the catalog
* @param auditInfo audit info
* @param metadata metadata properties
* @param clusterInfo cluster information
*/
@Builder
private CatalogInfo(
final QualifiedName name,
final AuditInfo auditInfo,
final Map<String, String> metadata,
final ClusterInfo clusterInfo
) {
super(name, auditInfo, metadata);
this.clusterInfo = clusterInfo;
}
}
| 9,793 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/PartitionsSaveRequest.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.util.List;
/**
* Partition save request.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@EqualsAndHashCode(callSuper = false)
public class PartitionsSaveRequest {
// List of partitions
private List<PartitionInfo> partitions;
// List of partition ids/names for deletes
private List<String> partitionIdsForDeletes;
// If true, we check if partition exists and drop it before adding it back. If false, we do not check and just add.
private Boolean checkIfExists = true;
// If true, we alter if partition exists. If checkIfExists=false, then this is false too.
private Boolean alterIfExists = false;
}
| 9,794 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/PartitionsSaveResponse.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.util.ArrayList;
import java.util.List;
/**
* Partition save response.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@Builder
@AllArgsConstructor
@EqualsAndHashCode(callSuper = false)
public class PartitionsSaveResponse {
/**
* List of added partition names.
*/
private List<String> added;
/**
* List of updated partition names.
*/
private List<String> updated;
/**
* Default constructor.
*/
public PartitionsSaveResponse() {
added = new ArrayList<>();
updated = new ArrayList<>();
}
}
| 9,795 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/ViewInfo.java
|
/*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.io.Serializable;
/**
* Hive Virtual View Info.
*
* @author zhenl
* @since 1.2.0
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode
public class ViewInfo implements Serializable {
private static final long serialVersionUID = -7841464527538892424L;
/* view original text*/
private String viewOriginalText;
/* view expanded text*/
private String viewExpandedText;
}
| 9,796 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/TableInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.QualifiedName;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.List;
import java.util.Map;
/**
* Table Info.
*
* @author amajumdar
* @since 1.0.0
*/
@SuppressWarnings("checkstyle:finalclass")
@Data
@EqualsAndHashCode(callSuper = true)
@NoArgsConstructor
public class TableInfo extends BaseInfo {
private List<FieldInfo> fields;
private StorageInfo serde;
private ViewInfo view;
/**
* Constructor.
*
* @param name name of the table
* @param auditInfo audit information of the table
* @param metadata metadata of the table.
* @param fields list of columns
* @param serde storage informations
*/
@Builder
private TableInfo(
final QualifiedName name,
final AuditInfo auditInfo,
final Map<String, String> metadata,
final List<FieldInfo> fields,
final StorageInfo serde,
final ViewInfo view
) {
super(name, auditInfo, metadata);
this.fields = fields;
this.serde = serde;
this.view = view;
}
}
| 9,797 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/DatabaseInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.QualifiedName;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Database information.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@NoArgsConstructor
@EqualsAndHashCode(callSuper = true)
public final class DatabaseInfo extends BaseInfo {
/* location of the database */
private String uri;
/**
* Constructor.
*
* @param name name of the database
* @param auditInfo audit information of the database
* @param metadata metadata of the database.
* @param uri location of the database
*/
@Builder
private DatabaseInfo(
final QualifiedName name,
final AuditInfo auditInfo,
final Map<String, String> metadata,
final String uri
) {
super(name, auditInfo, metadata);
this.uri = uri;
}
}
| 9,798 |
0 |
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors
|
Create_ds/metacat/metacat-common-server/src/main/java/com/netflix/metacat/common/server/connectors/model/BaseInfo.java
|
/*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.common.server.connectors.model;
import com.netflix.metacat.common.QualifiedName;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
import java.util.Map;
/**
* Base class for catalog resources.
*
* @author amajumdar
* @since 1.0.0
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public abstract class BaseInfo implements Serializable {
private static final long serialVersionUID = 284049639636194327L;
/* Name of the resource */
private QualifiedName name;
/* Audit information of the resource */
private AuditInfo audit;
/* Metadata properties of the resource */
private Map<String, String> metadata;
}
| 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.