index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/MessageId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="110"
*/
@OpenWireType(typeCode = 110)
public class MessageId implements DataStructure, Comparable<MessageId> {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.MESSAGE_ID;
@OpenWireProperty(version = 10, sequence = 1)
protected String textView;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected ProducerId producerId;
@OpenWireProperty(version = 1, sequence = 3)
protected long producerSequenceId;
@OpenWireProperty(version = 1, sequence = 4)
protected long brokerSequenceId;
@OpenWireExtension
private transient String key;
@OpenWireExtension
private transient int hashCode;
public MessageId() {
this.producerId = new ProducerId();
}
public MessageId(ProducerInfo producerInfo, long producerSequenceId) {
this.producerId = producerInfo.getProducerId();
this.producerSequenceId = producerSequenceId;
}
public MessageId(String messageKey) {
setValue(messageKey);
}
public MessageId(String producerId, long producerSequenceId) {
this(new ProducerId(producerId), producerSequenceId);
}
public MessageId(ProducerId producerId, long producerSequenceId) {
this.producerId = producerId;
this.producerSequenceId = producerSequenceId;
}
/**
* Sets the value as a String
*/
public void setValue(String messageKey) {
key = messageKey;
// Parse off the sequenceId
int p = messageKey.lastIndexOf(":");
if (p >= 0) {
producerSequenceId = Long.parseLong(messageKey.substring(p + 1));
messageKey = messageKey.substring(0, p);
} else {
throw new NumberFormatException();
}
producerId = new ProducerId(messageKey);
}
/**
* Sets the transient text view of the message which will be ignored if the message is marshaled on a transport; so
* is only for in-JVM changes to accommodate foreign JMS message IDs
*/
public void setTextView(String key) {
this.textView = key;
}
/**
* @openwire:property version=10
* @return
*/
public String getTextView() {
return textView;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != getClass()) {
return false;
}
MessageId id = (MessageId) o;
return producerSequenceId == id.producerSequenceId && producerId.equals(id.producerId);
}
@Override
public int hashCode() {
if (hashCode == 0) {
if (textView != null) {
hashCode = textView.hashCode();
} else {
hashCode = producerId.hashCode() ^ (int) producerSequenceId;
}
}
return hashCode;
}
public String toProducerKey() {
if (textView == null) {
return toString();
} else {
return producerId.toString() + ":" + producerSequenceId;
}
}
@Override
public String toString() {
if (key == null) {
if (textView != null) {
if (textView.startsWith("ID:")) {
key = textView;
} else {
key = "ID:" + textView;
}
} else {
key = producerId.toString() + ":" + producerSequenceId;
}
}
return key;
}
/**
* @openwire:property version=1 cache=true
*/
public ProducerId getProducerId() {
return producerId;
}
public void setProducerId(ProducerId producerId) {
this.producerId = producerId;
}
/**
* @openwire:property version=1
*/
public long getProducerSequenceId() {
return producerSequenceId;
}
public void setProducerSequenceId(long producerSequenceId) {
this.producerSequenceId = producerSequenceId;
}
/**
* @openwire:property version=1
*/
public long getBrokerSequenceId() {
return brokerSequenceId;
}
public void setBrokerSequenceId(long brokerSequenceId) {
this.brokerSequenceId = brokerSequenceId;
}
@Override
public boolean isMarshallAware() {
return false;
}
public MessageId copy() {
MessageId copy = new MessageId(producerId, producerSequenceId);
copy.key = key;
copy.brokerSequenceId = brokerSequenceId;
copy.textView = textView;
return copy;
}
/**
* @param
* @return
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(MessageId other) {
int result = -1;
if (other != null) {
result = this.toString().compareTo(other.toString());
}
return result;
}
}
| 1,500 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ConsumerId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="122"
*/
@OpenWireType(typeCode = 122)
public class ConsumerId implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.CONSUMER_ID;
@OpenWireProperty(version = 1, sequence = 1)
protected String connectionId;
@OpenWireProperty(version = 1, sequence = 2)
protected long sessionId;
@OpenWireProperty(version = 1, sequence = 3)
protected long value;
@OpenWireExtension
protected transient int hashCode;
@OpenWireExtension
protected transient String key;
@OpenWireExtension
protected transient SessionId parentId;
public ConsumerId() {
}
public ConsumerId(String str){
if (str != null){
String[] splits = str.split(":");
if (splits != null && splits.length >= 3){
this.connectionId = splits[0];
this.sessionId = Long.parseLong(splits[1]);
this.value = Long.parseLong(splits[2]);
}
}
}
public ConsumerId(SessionId sessionId, long consumerId) {
this.connectionId = sessionId.getConnectionId();
this.sessionId = sessionId.getValue();
this.value = consumerId;
}
public ConsumerId(ConsumerId id) {
this.connectionId = id.getConnectionId();
this.sessionId = id.getSessionId();
this.value = id.getValue();
}
public SessionId getParentId() {
if (parentId == null) {
parentId = new SessionId(this);
}
return parentId;
}
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = connectionId.hashCode() ^ (int)sessionId ^ (int)value;
}
return hashCode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != ConsumerId.class) {
return false;
}
ConsumerId id = (ConsumerId)o;
return sessionId == id.sessionId && value == id.value && connectionId.equals(id.connectionId);
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String toString() {
if (key == null) {
key = connectionId + ":" + sessionId + ":" + value;
}
return key;
}
/**
* @openwire:property version=1
*/
public String getConnectionId() {
return connectionId;
}
public void setConnectionId(String connectionId) {
this.connectionId = connectionId;
}
/**
* @openwire:property version=1
*/
public long getSessionId() {
return sessionId;
}
public void setSessionId(long sessionId) {
this.sessionId = sessionId;
}
/**
* @openwire:property version=1
*/
public long getValue() {
return value;
}
public void setValue(long consumerId) {
this.value = consumerId;
}
@Override
public boolean isMarshallAware() {
return false;
}
}
| 1,501 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireMapMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectStreamException;
import java.io.OutputStream;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
import org.apache.activemq.openwire.buffer.UTF8Buffer;
import org.apache.activemq.openwire.codec.OpenWireFormat;
import org.apache.activemq.openwire.utils.IOExceptionSupport;
import org.apache.activemq.openwire.utils.OpenWireMarshallingSupport;
/**
* openwire:marshaller code="25"
*/
@OpenWireType(typeCode = 25)
public class OpenWireMapMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_MAP_MESSAGE;
@OpenWireExtension
protected transient Map<String, Object> map = new HashMap<String, Object>();
private Object readResolve() throws ObjectStreamException {
if (this.map == null) {
this.map = new HashMap<String, Object>();
}
return this;
}
@Override
public OpenWireMapMessage copy() {
OpenWireMapMessage copy = new OpenWireMapMessage();
copy(copy);
return copy;
}
private void copy(OpenWireMapMessage copy) {
storeContent();
super.copy(copy);
}
// We only need to marshal the content if we are hitting the wire.
@Override
public void beforeMarshall(OpenWireFormat wireFormat) throws IOException {
super.beforeMarshall(wireFormat);
storeContent();
}
@Override
public void clearMarshalledState() throws IOException {
super.clearMarshalledState();
map.clear();
}
@Override
public void storeContentAndClear() {
storeContent();
map.clear();
}
@Override
public void storeContent() {
try {
if (getContent() == null && !map.isEmpty()) {
DataByteArrayOutputStream bytesOut = new DataByteArrayOutputStream();
OutputStream os = bytesOut;
if (isUseCompression()) {
compressed = true;
os = new DeflaterOutputStream(os);
}
DataOutputStream dataOut = new DataOutputStream(os);
OpenWireMarshallingSupport.marshalPrimitiveMap(map, dataOut);
dataOut.close();
setContent(bytesOut.toBuffer());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Builds the message body from data
*
* @throws IOException
*/
private void loadContent() throws IOException {
try {
if (getContent() != null && map.isEmpty()) {
Buffer content = getContent();
InputStream is = new DataByteArrayInputStream(content);
if (isCompressed()) {
is = new InflaterInputStream(is);
}
DataInputStream dataIn = new DataInputStream(is);
map = OpenWireMarshallingSupport.unmarshalPrimitiveMap(dataIn);
dataIn.close();
}
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String getMimeType() {
return "jms/map-message";
}
/**
* Clears out the message body. Clearing a message's body does not clear its
* header values or property entries.
* <P>
* If this message body was read-only, calling this method leaves the
* message body in the same state as an empty body in a newly created
* message.
*/
@Override
public void clearBody() throws IOException {
super.clearBody();
map.clear();
}
/**
* Returns the value of the object with the specified name.
* <P>
* This method can be used to return, in objectified format, an object in
* the Java programming language ("Java object") that had been stored in the
* Map with the equivalent <CODE>setObject</CODE> method call, or its
* equivalent primitive <CODE>set <I>type </I></CODE> method.
* <P>
* Note that byte values are returned as <CODE>byte[]</CODE>, not
* <CODE>Byte[]</CODE>.
*
* @param name
* the name of the Java object
*
* @return a copy of the Java object value with the specified name, in
* objectified format (for example, if the object was set as an
* <CODE>int</CODE>, an <CODE>Integer</CODE> is returned); if
* there is no item by this name, a null value is returned
* @throws JMSException if the JMS provider fails to read the message due to
* some internal error.
*/
public Object getObject(String name) throws IOException {
initializeReading();
Object result = getContentMap().get(name);
if (result instanceof UTF8Buffer) {
result = result.toString();
}
return result;
}
/**
* Sets an object value with the specified name into the Map.
* <P>
* This method works only for the objectified primitive object types
* (<code>Integer</code>,<code>Double</code>,<code>Long</code> ...),
* <code>String</code> objects, and byte arrays.
*
* @param name
* the name of the Java object
* @param value
* the Java object value to set in the Map
* @throws JMSException if the JMS provider fails to write the message due
* to some internal error.
* @throws IllegalArgumentException if the name is null or if the name is an
* empty string.
* @throws MessageFormatException if the object is invalid.
*/
public void setObject(String name, Object value) throws IOException {
initializeWriting();
if (value != null) {
// byte[] not allowed on properties
if (!(value instanceof byte[])) {
checkValidObject(value);
}
put(name, value);
} else {
put(name, null);
}
}
/**
* Removes an object value with the specified name into the Map.
*
* @param name
* the name of the Java object
*
* @throws JMSException if the JMS provider fails to write the message due
* to some internal error.
* @throws IllegalArgumentException if the name is null or if the name is an
* empty string.
*/
public void removeObject(String name) throws IOException {
initializeWriting();
if (name == null || name.trim().isEmpty()) {
throw new IllegalArgumentException("map element name cannot be null or empty.");
}
this.map.remove(name);
}
/**
* Returns an <CODE>Enumeration</CODE> of all the names in the
* <CODE>MapMessage</CODE> object.
*
* @return an enumeration of all the names in this <CODE>MapMessage</CODE>
* @throws JMSException
*/
public Enumeration<String> getMapNames() throws IOException {
return Collections.enumeration(getContentMap().keySet());
}
/**
* Indicates whether an item exists in this <CODE>MapMessage</CODE>
* object.
*
* @param name the name of the item to test
* @return true if the item exists
* @throws JMSException if the JMS provider fails to determine if the item
* exists due to some internal error.
*/
public boolean itemExists(String name) throws IOException {
return getContentMap().containsKey(name);
}
private void initializeReading() throws IOException {
loadContent();
}
private void initializeWriting() throws IOException {
setContent(null);
}
@Override
public void compress() throws IOException {
storeContent();
super.compress();
}
@Override
public String toString() {
return super.toString() + " OpenWireMapMessage{ " + "theTable = " + map + " }";
}
protected Map<String, Object> getContentMap() throws IOException {
initializeReading();
return map;
}
protected void put(String name, Object value) throws IOException {
if (name == null) {
throw new IllegalArgumentException("The name of the property cannot be null.");
}
if (name.length() == 0) {
throw new IllegalArgumentException("The name of the property cannot be an emprty string.");
}
map.put(name, value);
}
}
| 1,502 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ProducerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="6"
*/
@OpenWireType(typeCode = 6)
public class ProducerInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.PRODUCER_INFO;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected ProducerId producerId;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 3, cached = true)
protected BrokerId[] brokerPath;
@OpenWireProperty(version = 2, sequence = 4)
protected boolean dispatchAsync;
@OpenWireProperty(version = 3, sequence = 5)
protected int windowSize;
public ProducerInfo() {
}
public ProducerInfo(ProducerId producerId) {
this.producerId = producerId;
}
public ProducerInfo(SessionInfo sessionInfo, long producerId) {
this.producerId = new ProducerId(sessionInfo.getSessionId(), producerId);
}
public ProducerInfo copy() {
ProducerInfo info = new ProducerInfo();
copy(info);
return info;
}
public void copy(ProducerInfo info) {
super.copy(info);
info.producerId = producerId;
info.destination = destination;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public ProducerId getProducerId() {
return producerId;
}
public void setProducerId(ProducerId producerId) {
this.producerId = producerId;
}
/**
* @openwire:property version=1 cache=true
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
public RemoveInfo createRemoveCommand() {
RemoveInfo command = new RemoveInfo(getProducerId());
command.setResponseRequired(isResponseRequired());
return command;
}
/**
* The route of brokers the command has moved through.
*
* @openwire:property version=1 cache=true
*/
public BrokerId[] getBrokerPath() {
return brokerPath;
}
public void setBrokerPath(BrokerId[] brokerPath) {
this.brokerPath = brokerPath;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processAddProducer(this);
}
/**
* If the broker should dispatch messages from this producer async. Since
* sync dispatch could potentally block the producer thread, this could be
* an important setting for the producer.
*
* @openwire:property version=2
*/
public boolean isDispatchAsync() {
return dispatchAsync;
}
public void setDispatchAsync(boolean dispatchAsync) {
this.dispatchAsync = dispatchAsync;
}
/**
* Used to configure the producer window size. A producer will send up to
* the configured window size worth of payload data to the broker before
* waiting for an Ack that allows him to send more.
*
* @openwire:property version=3
*/
public int getWindowSize() {
return windowSize;
}
public void setWindowSize(int windowSize) {
this.windowSize = windowSize;
}
@Override
public boolean isProducerInfo() {
return true;
}
}
| 1,503 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/BaseCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller
*/
@OpenWireType(typeCode = 0)
public abstract class BaseCommand implements Command {
@OpenWireProperty(version = 1, sequence = 1)
protected int commandId;
@OpenWireProperty(version = 1, sequence = 2)
protected boolean responseRequired;
public void copy(BaseCommand copy) {
copy.commandId = commandId;
copy.responseRequired = responseRequired;
}
/**
* @openwire:property version=1
*/
@Override
public int getCommandId() {
return commandId;
}
@Override
public void setCommandId(int commandId) {
this.commandId = commandId;
}
/**
* @openwire:property version=1
*/
@Override
public boolean isResponseRequired() {
return responseRequired;
}
@Override
public void setResponseRequired(boolean responseRequired) {
this.responseRequired = responseRequired;
}
@Override
public String toString() {
return getClass().getSimpleName();
}
@Override
public boolean isWireFormatInfo() {
return false;
}
@Override
public boolean isBrokerInfo() {
return false;
}
@Override
public boolean isResponse() {
return false;
}
@Override
public boolean isMessageDispatch() {
return false;
}
@Override
public boolean isMessage() {
return false;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public boolean isMessageAck() {
return false;
}
@Override
public boolean isMessageDispatchNotification() {
return false;
}
@Override
public boolean isShutdownInfo() {
return false;
}
@Override
public boolean isConnectionControl() {
return false;
}
@Override
public boolean isConnectionInfo() {
return false;
}
@Override
public boolean isSessionInfo() {
return false;
}
@Override
public boolean isProducerInfo() {
return false;
}
@Override
public boolean isConsumerInfo() {
return false;
}
}
| 1,504 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ConsumerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.util.ArrayList;
import java.util.List;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.apache.activemq.openwire.annotations.OpenWireType;
/**
* @openwire:marshaller code="5"
*/
@OpenWireType(typeCode = 5)
public class ConsumerInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.CONSUMER_INFO;
public static final byte HIGH_PRIORITY = 10;
public static final byte NORMAL_PRIORITY = 0;
public static final byte NETWORK_CONSUMER_PRIORITY = -5;
public static final byte LOW_PRIORITY = -10;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected ConsumerId consumerId;
@OpenWireProperty(version = 1, sequence = 2)
protected boolean browser;
@OpenWireProperty(version = 1, sequence = 3, cached = true)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 4)
protected int prefetchSize;
@OpenWireProperty(version = 1, sequence = 5)
protected int maximumPendingMessageLimit;
@OpenWireProperty(version = 1, sequence = 6)
protected boolean dispatchAsync;
@OpenWireProperty(version = 1, sequence = 7)
protected String selector;
@OpenWireProperty(version = 10, sequence = 8)
protected String clientId;
@OpenWireProperty(version = 1, sequence = 9)
protected String subscriptionName;
@OpenWireProperty(version = 1, sequence = 10)
protected boolean noLocal;
@OpenWireProperty(version = 1, sequence = 11)
protected boolean exclusive;
@OpenWireProperty(version = 1, sequence = 12)
protected boolean retroactive;
@OpenWireProperty(version = 1, sequence = 13)
protected byte priority;
@OpenWireProperty(version = 1, sequence = 14, cached = true)
protected BrokerId[] brokerPath;
@OpenWireProperty(version = 1, sequence = 15)
protected Object additionalPredicate;
@OpenWireProperty(version = 1, sequence = 16, serialized = false)
protected boolean networkSubscription;
@OpenWireProperty(version = 1, sequence = 17)
protected boolean optimizedAcknowledge;
@OpenWireProperty(version = 1, sequence = 18)
protected boolean noRangeAcks;
@OpenWireProperty(version = 4, sequence = 19, serialized = false)
protected transient ConsumerId[] networkConsumerPath;
@OpenWireExtension(serialized = false)
protected transient List<ConsumerId> networkConsumerIds;
public ConsumerInfo() {
}
public ConsumerInfo(ConsumerId consumerId) {
this.consumerId = consumerId;
}
public ConsumerInfo(SessionInfo sessionInfo, long consumerId) {
this.consumerId = new ConsumerId(sessionInfo.getSessionId(), consumerId);
}
public ConsumerInfo copy() {
ConsumerInfo info = new ConsumerInfo();
copy(info);
return info;
}
public void copy(ConsumerInfo info) {
super.copy(info);
info.consumerId = consumerId;
info.destination = destination;
info.prefetchSize = prefetchSize;
info.maximumPendingMessageLimit = maximumPendingMessageLimit;
info.browser = browser;
info.dispatchAsync = dispatchAsync;
info.selector = selector;
info.clientId = clientId;
info.subscriptionName = subscriptionName;
info.noLocal = noLocal;
info.exclusive = exclusive;
info.retroactive = retroactive;
info.priority = priority;
info.brokerPath = brokerPath;
info.networkSubscription = networkSubscription;
if (networkConsumerIds != null) {
if (info.networkConsumerIds == null) {
info.networkConsumerIds = new ArrayList<ConsumerId>();
}
info.networkConsumerIds.addAll(networkConsumerIds);
}
}
public boolean isDurable() {
return subscriptionName != null;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* Is used to uniquely identify the consumer to the broker.
*
* @openwire:property version=1 cache=true
*/
public ConsumerId getConsumerId() {
return consumerId;
}
public void setConsumerId(ConsumerId consumerId) {
this.consumerId = consumerId;
}
/**
* Is this consumer a queue browser?
*
* @openwire:property version=1
*/
public boolean isBrowser() {
return browser;
}
public void setBrowser(boolean browser) {
this.browser = browser;
}
/**
* The destination that the consumer is interested in receiving messages
* from. This destination could be a composite destination.
*
* @openwire:property version=1 cache=true
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
/**
* How many messages a broker will send to the client without receiving an
* ack before he stops dispatching messages to the client.
*
* @openwire:property version=1
*/
public int getPrefetchSize() {
return prefetchSize;
}
public void setPrefetchSize(int prefetchSize) {
this.prefetchSize = prefetchSize;
}
/**
* How many messages a broker will keep around, above the prefetch limit,
* for non-durable topics before starting to discard older messages.
*
* @openwire:property version=1
*/
public int getMaximumPendingMessageLimit() {
return maximumPendingMessageLimit;
}
public void setMaximumPendingMessageLimit(int maximumPendingMessageLimit) {
this.maximumPendingMessageLimit = maximumPendingMessageLimit;
}
/**
* Should the broker dispatch a message to the consumer async? If he does it
* async, then he uses a more SEDA style of processing while if it is not
* done async, then he broker use a STP style of processing. STP is more
* appropriate in high bandwidth situations or when being used by and in vm
* transport.
*
* @openwire:property version=1
*/
public boolean isDispatchAsync() {
return dispatchAsync;
}
public void setDispatchAsync(boolean dispatchAsync) {
this.dispatchAsync = dispatchAsync;
}
/**
* The JMS selector used to filter out messages that this consumer is
* interested in.
*
* @openwire:property version=1
*/
public String getSelector() {
return selector;
}
public void setSelector(String selector) {
this.selector = selector;
}
/**
* Used to identify the id of a client connection.
*
* @openwire:property version=10
*/
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* Used to identify the name of a durable subscription.
*
* @openwire:property version=1
*/
public String getSubscriptionName() {
return subscriptionName;
}
public void setSubscriptionName(String durableSubscriptionId) {
this.subscriptionName = durableSubscriptionId;
}
/**
* Set noLocal to true to avoid receiving messages that were published
* locally on the same connection.
*
* @openwire:property version=1
*/
public boolean isNoLocal() {
return noLocal;
}
public void setNoLocal(boolean noLocal) {
this.noLocal = noLocal;
}
/**
* An exclusive consumer locks out other consumers from being able to
* receive messages from the destination. If there are multiple exclusive
* consumers for a destination, the first one created will be the exclusive
* consumer of the destination.
*
* @openwire:property version=1
*/
public boolean isExclusive() {
return exclusive;
}
public void setExclusive(boolean exclusive) {
this.exclusive = exclusive;
}
/**
* A retroactive consumer only has meaning for Topics. It allows a consumer
* to retroactively see messages sent prior to the consumer being created.
* If the consumer is not durable, it will be delivered the last message
* published to the topic. If the consumer is durable then it will receive
* all persistent messages that are still stored in persistent storage for
* that topic.
*
* @openwire:property version=1
*/
public boolean isRetroactive() {
return retroactive;
}
public void setRetroactive(boolean retroactive) {
this.retroactive = retroactive;
}
public RemoveInfo createRemoveCommand() {
RemoveInfo command = new RemoveInfo(getConsumerId());
command.setResponseRequired(isResponseRequired());
return command;
}
/**
* The broker will avoid dispatching to a lower priority consumer if there
* are other higher priority consumers available to dispatch to. This allows
* letting the broker to have an affinity to higher priority consumers.
* Default priority is 0.
*
* @openwire:property version=1
*/
public byte getPriority() {
return priority;
}
public void setPriority(byte priority) {
this.priority = priority;
}
/**
* The route of brokers the command has moved through.
*
* @openwire:property version=1 cache=true
*/
public BrokerId[] getBrokerPath() {
return brokerPath;
}
public void setBrokerPath(BrokerId[] brokerPath) {
this.brokerPath = brokerPath;
}
/**
* A transient additional predicate that can be used it inject additional
* predicates into the selector on the fly. Handy if if say a Security
* Broker interceptor wants to filter out messages based on security level
* of the consumer.
*
* @openwire:property version=1
*/
public Object getAdditionalPredicate() {
return additionalPredicate;
}
public void setAdditionalPredicate(Object additionalPredicate) {
this.additionalPredicate = additionalPredicate;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processAddConsumer(this);
}
/**
* @openwire:property version=1
* @return Returns the networkSubscription.
*/
public boolean isNetworkSubscription() {
return networkSubscription;
}
/**
* @param networkSubscription The networkSubscription to set.
*/
public void setNetworkSubscription(boolean networkSubscription) {
this.networkSubscription = networkSubscription;
}
/**
* @openwire:property version=1
* @return Returns the optimizedAcknowledge.
*/
public boolean isOptimizedAcknowledge() {
return optimizedAcknowledge;
}
/**
* @param optimizedAcknowledge The optimizedAcknowledge to set.
*/
public void setOptimizedAcknowledge(boolean optimizedAcknowledge) {
this.optimizedAcknowledge = optimizedAcknowledge;
}
/**
* The broker may be able to optimize it's processing or provides better QOS
* if it knows the consumer will not be sending ranged acks.
*
* @return true if the consumer will not send range acks.
* @openwire:property version=1
*/
public boolean isNoRangeAcks() {
return noRangeAcks;
}
public void setNoRangeAcks(boolean noRangeAcks) {
this.noRangeAcks = noRangeAcks;
}
/**
* Tracks the original subscription id that causes a subscription to
* percolate through a network when networkTTL > 1. Tracking the original
* subscription allows duplicate suppression.
*
* @return array of the current subscription path
* @openwire:property version=4
*/
public ConsumerId[] getNetworkConsumerPath() {
ConsumerId[] result = null;
if (networkConsumerIds != null) {
result = networkConsumerIds.toArray(new ConsumerId[0]);
}
return result;
}
public void setNetworkConsumerPath(ConsumerId[] consumerPath) {
if (consumerPath != null) {
for (int i=0; i<consumerPath.length; i++) {
addNetworkConsumerId(consumerPath[i]);
}
}
}
@Override
public int hashCode() {
return (consumerId == null) ? 0 : consumerId.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ConsumerInfo other = (ConsumerInfo) obj;
if (consumerId == null && other.consumerId != null) {
return false;
} else if (!consumerId.equals(other.consumerId)) {
return false;
}
return true;
}
public synchronized void addNetworkConsumerId(ConsumerId networkConsumerId) {
if (networkConsumerIds == null) {
networkConsumerIds = new ArrayList<ConsumerId>();
}
networkConsumerIds.add(networkConsumerId);
}
public synchronized void removeNetworkConsumerId(ConsumerId networkConsumerId) {
if (networkConsumerIds != null) {
networkConsumerIds.remove(networkConsumerId);
if (networkConsumerIds.isEmpty()) {
networkConsumerIds=null;
}
}
}
@Override
public boolean isConsumerInfo() {
return true;
}
}
| 1,505 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ControlCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Used by the Broker to send a specific named command to the client.
*
* @openwire:marshaller code="14"
*/
@OpenWireType(typeCode = 14)
public class ControlCommand extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.CONTROL_COMMAND;
@OpenWireProperty(version = 1, sequence = 1)
private String command;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public String getCommand() {
return command;
}
public void setCommand(String command) {
this.command = command;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processControlCommand(this);
}
}
| 1,506 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/WireFormatInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
import org.apache.activemq.openwire.buffer.UTF8Buffer;
import org.apache.activemq.openwire.codec.OpenWireFormat;
import org.apache.activemq.openwire.utils.OpenWireMarshallingSupport;
/**
* @openwire:marshaller code="1"
*/
@OpenWireType(typeCode = 1, marshalAware = true)
public class WireFormatInfo implements Command, MarshallAware {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.WIREFORMAT_INFO;
private static final int MAX_PROPERTY_SIZE = 1024 * 4;
private static final byte MAGIC[] = new byte[] { 'A', 'c', 't', 'i', 'v', 'e', 'M', 'Q' };
@OpenWireProperty(version = 1, sequence = 1, size = 8)
protected byte magic[] = MAGIC;
@OpenWireProperty(version = 1, sequence = 2)
protected int version;
@OpenWireProperty(version = 1, sequence = 3)
protected Buffer marshalledProperties;
@OpenWireExtension
protected transient Map<String, Object> properties;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean isWireFormatInfo() {
return true;
}
@Override
public boolean isMarshallAware() {
return true;
}
/**
* @openwire:property version=1 size=8 testSize=-1
*/
public byte[] getMagic() {
return magic;
}
public void setMagic(byte[] magic) {
this.magic = magic;
}
/**
* @openwire:property version=1
*/
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
/**
* @openwire:property version=1
*/
public Buffer getMarshalledProperties() {
return marshalledProperties;
}
public void setMarshalledProperties(Buffer marshalledProperties) {
this.marshalledProperties = marshalledProperties;
}
// ////////////////////
// Implementation Methods.
// ////////////////////
public Object getProperty(String name) throws IOException {
if (properties == null) {
if (marshalledProperties == null) {
return null;
}
properties = unmarsallProperties(marshalledProperties);
}
return properties.get(name);
}
@SuppressWarnings("unchecked")
public Map<String, Object> getProperties() throws IOException {
if (properties == null) {
if (marshalledProperties == null) {
return Collections.EMPTY_MAP;
}
properties = unmarsallProperties(marshalledProperties);
}
return Collections.unmodifiableMap(properties);
}
public void clearProperties() {
marshalledProperties = null;
properties = null;
}
public void setProperty(String name, Object value) throws IOException {
lazyCreateProperties();
properties.put(name, value);
}
protected void lazyCreateProperties() throws IOException {
if (properties == null) {
if (marshalledProperties == null) {
properties = new HashMap<String, Object>();
} else {
properties = unmarsallProperties(marshalledProperties);
marshalledProperties = null;
}
}
}
private Map<String, Object> unmarsallProperties(Buffer marshalledProperties) throws IOException {
return OpenWireMarshallingSupport.unmarshalPrimitiveMap(new DataInputStream(new DataByteArrayInputStream(marshalledProperties)), MAX_PROPERTY_SIZE);
}
@Override
public void beforeMarshall(OpenWireFormat wireFormat) throws IOException {
// Need to marshal the properties.
if (marshalledProperties == null && properties != null) {
DataByteArrayOutputStream baos = new DataByteArrayOutputStream();
DataOutputStream os = new DataOutputStream(baos);
OpenWireMarshallingSupport.marshalPrimitiveMap(properties, os);
os.close();
marshalledProperties = baos.toBuffer();
}
}
@Override
public void afterMarshall(OpenWireFormat wireFormat) throws IOException {
}
@Override
public void beforeUnmarshall(OpenWireFormat wireFormat) throws IOException {
}
@Override
public void afterUnmarshall(OpenWireFormat wireFormat) throws IOException {
}
public boolean isValid() {
return magic != null && Arrays.equals(magic, MAGIC);
}
@Override
public void setResponseRequired(boolean responseRequired) {
}
/**
* @throws IOException
*/
public boolean isCacheEnabled() throws IOException {
return Boolean.TRUE == getProperty("CacheEnabled");
}
public void setCacheEnabled(boolean cacheEnabled) throws IOException {
setProperty("CacheEnabled", cacheEnabled ? Boolean.TRUE : Boolean.FALSE);
}
/**
* @throws IOException
*/
public boolean isStackTraceEnabled() throws IOException {
return Boolean.TRUE == getProperty("StackTraceEnabled");
}
public void setStackTraceEnabled(boolean stackTraceEnabled) throws IOException {
setProperty("StackTraceEnabled", stackTraceEnabled ? Boolean.TRUE : Boolean.FALSE);
}
/**
* @throws IOException
*/
public boolean isTcpNoDelayEnabled() throws IOException {
return Boolean.TRUE == getProperty("TcpNoDelayEnabled");
}
public void setTcpNoDelayEnabled(boolean tcpNoDelayEnabled) throws IOException {
setProperty("TcpNoDelayEnabled", tcpNoDelayEnabled ? Boolean.TRUE : Boolean.FALSE);
}
/**
* @throws IOException
*/
public boolean isSizePrefixDisabled() throws IOException {
return Boolean.TRUE == getProperty("SizePrefixDisabled");
}
public void setSizePrefixDisabled(boolean prefixPacketSize) throws IOException {
setProperty("SizePrefixDisabled", prefixPacketSize ? Boolean.TRUE : Boolean.FALSE);
}
/**
* @throws IOException
*/
public boolean isTightEncodingEnabled() throws IOException {
return Boolean.TRUE == getProperty("TightEncodingEnabled");
}
public void setTightEncodingEnabled(boolean tightEncodingEnabled) throws IOException {
setProperty("TightEncodingEnabled", tightEncodingEnabled ? Boolean.TRUE : Boolean.FALSE);
}
public String getHost() throws IOException {
UTF8Buffer buff = (UTF8Buffer) getProperty("Host");
if (buff == null) {
return null;
}
return buff.toString();
}
public void setHost(String hostname) throws IOException {
setProperty("Host", hostname);
}
/**
* @throws IOException
*/
public long getMaxInactivityDuration() throws IOException {
Long l = (Long) getProperty("MaxInactivityDuration");
return l == null ? 0 : l.longValue();
}
public void setMaxInactivityDuration(long maxInactivityDuration) throws IOException {
setProperty("MaxInactivityDuration", new Long(maxInactivityDuration));
}
public long getMaxInactivityDurationInitalDelay() throws IOException {
Long l = (Long) getProperty("MaxInactivityDurationInitalDelay");
return l == null ? 0 : l.longValue();
}
public void setMaxInactivityDurationInitalDelay(long maxInactivityDurationInitalDelay) throws IOException {
setProperty("MaxInactivityDurationInitalDelay", new Long(maxInactivityDurationInitalDelay));
}
public long getMaxFrameSize() throws IOException {
Long l = (Long) getProperty("MaxFrameSize");
return l == null ? 0 : l.longValue();
}
public void setMaxFrameSize(long maxFrameSize) throws IOException {
setProperty("MaxFrameSize", new Long(maxFrameSize));
}
/**
* @throws IOException
*/
public int getCacheSize() throws IOException {
Integer i = (Integer) getProperty("CacheSize");
return i == null ? 0 : i.intValue();
}
public void setCacheSize(int cacheSize) throws IOException {
setProperty("CacheSize", new Integer(cacheSize));
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processWireFormat(this);
}
@Override
public String toString() {
Map<String, Object> p = null;
try {
p = getProperties();
} catch (IOException ignore) {
}
return "WireFormatInfo { version=" + version + ", properties=" + p + ", magic=" + toString(magic) + "}";
}
private String toString(byte[] data) {
StringBuffer sb = new StringBuffer();
sb.append('[');
for (int i = 0; i < data.length; i++) {
if (i != 0) {
sb.append(',');
}
sb.append((char) data[i]);
}
sb.append(']');
return sb.toString();
}
// /////////////////////////////////////////////////////////////
//
// This are not implemented.
//
// /////////////////////////////////////////////////////////////
@Override
public void setCommandId(int value) {
}
@Override
public int getCommandId() {
return 0;
}
@Override
public boolean isResponseRequired() {
return false;
}
@Override
public boolean isResponse() {
return false;
}
@Override
public boolean isBrokerInfo() {
return false;
}
@Override
public boolean isMessageDispatch() {
return false;
}
@Override
public boolean isMessage() {
return false;
}
@Override
public boolean isMessageAck() {
return false;
}
@Override
public boolean isMessageDispatchNotification() {
return false;
}
@Override
public boolean isShutdownInfo() {
return false;
}
@Override
public boolean isConnectionControl() {
return false;
}
@Override
public boolean isConnectionInfo() {
return false;
}
@Override
public boolean isSessionInfo() {
return false;
}
@Override
public boolean isProducerInfo() {
return false;
}
@Override
public boolean isConsumerInfo() {
return false;
}
}
| 1,507 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ConnectionError.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="16"
*/
@OpenWireType(typeCode = 16)
public class ConnectionError extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.CONNECTION_ERROR;
@OpenWireProperty(version = 1, sequence = 1)
private Throwable exception;
@OpenWireProperty(version = 1, sequence = 2)
private ConnectionId connectionId;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processConnectionError(this);
}
/**
* @openwire:property version=1
*/
public Throwable getException() {
return exception;
}
public void setException(Throwable exception) {
this.exception = exception;
}
/**
* @openwire:property version=1
*/
public ConnectionId getConnectionId() {
return connectionId;
}
public void setConnectionId(ConnectionId connectionId) {
this.connectionId = connectionId;
}
}
| 1,508 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/XATransactionId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import java.util.Arrays;
import javax.transaction.xa.Xid;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
/**
* @openwire:marshaller code="112"
*/
@OpenWireType(typeCode = 112)
public class XATransactionId extends TransactionId implements Xid, Comparable<XATransactionId> {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_XA_TRANSACTION_ID;
@OpenWireProperty(version = 1, sequence = 1)
private int formatId;
@OpenWireProperty(version = 1, sequence = 2)
private byte[] globalTransactionId;
@OpenWireProperty(version = 1, sequence = 3)
private byte[] branchQualifier;
@OpenWireExtension
private transient DataByteArrayOutputStream outputStream;
@OpenWireExtension
private transient byte[] encodedXidBytes;
@OpenWireExtension
private transient int hash;
@OpenWireExtension
private transient String transactionKey;
public XATransactionId() {
}
public XATransactionId(Xid xid) {
this.formatId = xid.getFormatId();
this.globalTransactionId = xid.getGlobalTransactionId();
this.branchQualifier = xid.getBranchQualifier();
}
public XATransactionId(byte[] encodedBytes) throws IOException {
encodedXidBytes = encodedBytes;
initFromEncodedBytes();
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
final int XID_PREFIX_SIZE = 16;
//+|-,(long)lastAck,(byte)priority,(int)formatid,(short)globalLength....
private void initFromEncodedBytes() throws IOException {
DataByteArrayInputStream inputStream = new DataByteArrayInputStream(encodedXidBytes);
inputStream.skipBytes(10);
formatId = inputStream.readInt();
int globalLength = inputStream.readShort();
globalTransactionId = new byte[globalLength];
try {
inputStream.read(globalTransactionId);
branchQualifier = new byte[inputStream.available()];
inputStream.read(branchQualifier);
} catch (IOException fatal) {
throw new RuntimeException(this + ", failed to decode:", fatal);
} finally {
inputStream.close();
}
}
public synchronized byte[] getEncodedXidBytes() {
if (encodedXidBytes == null) {
outputStream = new DataByteArrayOutputStream(XID_PREFIX_SIZE + globalTransactionId.length + branchQualifier.length);
try {
outputStream.position(10);
outputStream.writeInt(formatId);
outputStream.writeShort(globalTransactionId.length);
} catch (IOException fatal) {
throw new RuntimeException(this + ", failed to encode:", fatal);
}
try {
outputStream.write(globalTransactionId);
outputStream.write(branchQualifier);
} catch (IOException fatal) {
throw new RuntimeException(this + ", failed to encode:", fatal);
}
encodedXidBytes = outputStream.getData();
}
return encodedXidBytes;
}
public DataByteArrayOutputStream internalOutputStream() {
return outputStream;
}
@Override
public synchronized String getTransactionKey() {
if (transactionKey == null) {
StringBuffer s = new StringBuffer();
s.append("XID:[" + formatId + ",globalId=");
s.append(stringForm(formatId, globalTransactionId));
s.append(",branchId=");
s.append(stringForm(formatId, branchQualifier));
s.append("]");
transactionKey = s.toString();
}
return transactionKey;
}
private String stringForm(int format, byte[] uid) {
StringBuffer s = new StringBuffer();
switch (format) {
case 131077: // arjuna
stringFormArj(s, uid);
break;
default: // aries
stringFormDefault(s, uid);
}
return s.toString();
}
private void stringFormDefault(StringBuffer s, byte[] uid) {
for (int i = 0; i < uid.length; i++) {
s.append(Integer.toHexString(uid[i]));
}
}
private void stringFormArj(StringBuffer s, byte[] uid) {
DataByteArrayInputStream byteArrayInputStream = null;
try {
byteArrayInputStream = new DataByteArrayInputStream(uid);
s.append(Long.toString(byteArrayInputStream.readLong(), 16));
s.append(':');
s.append(Long.toString(byteArrayInputStream.readLong(), 16));
s.append(':');
s.append(Integer.toString(byteArrayInputStream.readInt(), 16));
s.append(':');
s.append(Integer.toString(byteArrayInputStream.readInt(), 16));
s.append(':');
s.append(Integer.toString(byteArrayInputStream.readInt(), 16));
} catch (Exception ignored) {
stringFormDefault(s, uid);
} finally {
try {
byteArrayInputStream.close();
} catch (IOException e) {
}
}
}
@Override
public String toString() {
return getTransactionKey();
}
@Override
public boolean isXATransaction() {
return true;
}
@Override
public boolean isLocalTransaction() {
return false;
}
/**
* @openwire:property version=1
*/
@Override
public int getFormatId() {
return formatId;
}
/**
* @openwire:property version=1
*/
@Override
public byte[] getGlobalTransactionId() {
return globalTransactionId;
}
/**
* @openwire:property version=1
*/
@Override
public byte[] getBranchQualifier() {
return branchQualifier;
}
public void setBranchQualifier(byte[] branchQualifier) {
this.branchQualifier = branchQualifier;
this.hash = 0;
}
public void setFormatId(int formatId) {
this.formatId = formatId;
this.hash = 0;
}
public void setGlobalTransactionId(byte[] globalTransactionId) {
this.globalTransactionId = globalTransactionId;
this.hash = 0;
}
@Override
public int hashCode() {
if (hash == 0) {
hash = formatId;
hash = hash(globalTransactionId, hash);
hash = hash(branchQualifier, hash);
if (hash == 0) {
hash = 0xaceace;
}
}
return hash;
}
private static int hash(byte[] bytes, int hash) {
int size = bytes.length;
for (int i = 0; i < size; i++) {
hash ^= bytes[i] << ((i % 4) * 8);
}
return hash;
}
@Override
public boolean equals(Object o) {
if (o == null || o.getClass() != XATransactionId.class) {
return false;
}
XATransactionId xid = (XATransactionId)o;
return xid.formatId == formatId &&
Arrays.equals(xid.globalTransactionId, globalTransactionId) &&
Arrays.equals(xid.branchQualifier, branchQualifier);
}
@Override
public int compareTo(XATransactionId xid) {
if (xid == null) {
return -1;
}
return getTransactionKey().compareTo(xid.getTransactionKey());
}
}
| 1,509 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/JournalTrace.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="53"
*/
@OpenWireType(typeCode = 53)
public class JournalTrace implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.JOURNAL_TRACE;
@OpenWireProperty(version = 1, sequence = 1)
private String message;
public JournalTrace() {
}
public JournalTrace(String message) {
this.message = message;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public String getMessage() {
return message;
}
/**
* @openwire:property version=1
*/
public void setMessage(String message) {
this.message = message;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public String toString() {
return getClass().getSimpleName() + " { " + message + " }";
}
}
| 1,510 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/BrokerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* When a client connects to a broker, the broker send the client a BrokerInfo
* so that the client knows which broker node he's talking to and also any peers
* that the node has in his cluster. This is the broker helping the client out
* in discovering other nodes in the cluster.
*
* @openwire:marshaller code="2"
*/
@OpenWireType(typeCode = 2)
public class BrokerInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.BROKER_INFO;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected BrokerId brokerId;
@OpenWireProperty(version = 1, sequence = 2)
protected String brokerURL;
@OpenWireProperty(version = 1, sequence = 3)
protected BrokerInfo peerBrokerInfos[];
@OpenWireProperty(version = 1, sequence = 4)
protected String brokerName;
@OpenWireProperty(version = 1, sequence = 5)
protected boolean slaveBroker;
@OpenWireProperty(version = 1, sequence = 6)
protected boolean masterBroker;
@OpenWireProperty(version = 1, sequence = 7)
protected boolean faultTolerantConfiguration;
@OpenWireProperty(version = 2, sequence = 8)
protected boolean duplexConnection;
@OpenWireProperty(version = 2, sequence = 9)
protected boolean networkConnection;
@OpenWireProperty(version = 2, sequence = 10)
protected long connectionId;
@OpenWireProperty(version = 3, sequence = 11)
protected String brokerUploadUrl;
@OpenWireProperty(version = 3, sequence = 12)
protected String networkProperties;
public BrokerInfo copy() {
BrokerInfo copy = new BrokerInfo();
copy(copy);
return copy;
}
private void copy(BrokerInfo copy) {
super.copy(copy);
copy.brokerId = this.brokerId;
copy.brokerURL = this.brokerURL;
copy.slaveBroker = this.slaveBroker;
copy.masterBroker = this.masterBroker;
copy.faultTolerantConfiguration = this.faultTolerantConfiguration;
copy.networkConnection = this.networkConnection;
copy.duplexConnection = this.duplexConnection;
copy.peerBrokerInfos = this.peerBrokerInfos;
copy.brokerName = this.brokerName;
copy.connectionId = this.connectionId;
copy.brokerUploadUrl = this.brokerUploadUrl;
copy.networkProperties = this.networkProperties;
}
@Override
public boolean isBrokerInfo() {
return true;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public BrokerId getBrokerId() {
return brokerId;
}
public void setBrokerId(BrokerId brokerId) {
this.brokerId = brokerId;
}
/**
* @openwire:property version=1
*/
public String getBrokerURL() {
return brokerURL;
}
public void setBrokerURL(String brokerURL) {
this.brokerURL = brokerURL;
}
/**
* @openwire:property version=1 testSize=0
*/
public BrokerInfo[] getPeerBrokerInfos() {
return peerBrokerInfos;
}
public void setPeerBrokerInfos(BrokerInfo[] peerBrokerInfos) {
this.peerBrokerInfos = peerBrokerInfos;
}
/**
* @openwire:property version=1
*/
public String getBrokerName() {
return brokerName;
}
public void setBrokerName(String brokerName) {
this.brokerName = brokerName;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processBrokerInfo(this);
}
/**
* @openwire:property version=1
*/
public boolean isSlaveBroker() {
return slaveBroker;
}
public void setSlaveBroker(boolean slaveBroker) {
this.slaveBroker = slaveBroker;
}
/**
* @openwire:property version=1
*/
public boolean isMasterBroker() {
return masterBroker;
}
/**
* @param masterBroker The masterBroker to set.
*/
public void setMasterBroker(boolean masterBroker) {
this.masterBroker = masterBroker;
}
/**
* @openwire:property version=1
* @return Returns the faultTolerantConfiguration.
*/
public boolean isFaultTolerantConfiguration() {
return faultTolerantConfiguration;
}
/**
* @param faultTolerantConfiguration The faultTolerantConfiguration to set.
*/
public void setFaultTolerantConfiguration(boolean faultTolerantConfiguration) {
this.faultTolerantConfiguration = faultTolerantConfiguration;
}
/**
* @openwire:property version=2
* @return the duplexConnection
*/
public boolean isDuplexConnection() {
return this.duplexConnection;
}
/**
* @param duplexConnection the duplexConnection to set
*/
public void setDuplexConnection(boolean duplexConnection) {
this.duplexConnection = duplexConnection;
}
/**
* @openwire:property version=2
* @return the networkConnection
*/
public boolean isNetworkConnection() {
return this.networkConnection;
}
/**
* @param networkConnection the networkConnection to set
*/
public void setNetworkConnection(boolean networkConnection) {
this.networkConnection = networkConnection;
}
/**
* The broker assigns a each connection it accepts a connection id.
*
* @openwire:property version=2
*/
public long getConnectionId() {
return connectionId;
}
public void setConnectionId(long connectionId) {
this.connectionId = connectionId;
}
/**
* The URL to use when uploading BLOBs to the broker or some other external
* file/http server
*
* @openwire:property version=3
*/
public String getBrokerUploadUrl() {
return brokerUploadUrl;
}
public void setBrokerUploadUrl(String brokerUploadUrl) {
this.brokerUploadUrl = brokerUploadUrl;
}
/**
* @openwire:property version=3 cache=false
* @return the networkProperties
*/
public String getNetworkProperties() {
return this.networkProperties;
}
/**
* @param networkProperties the networkProperties to set
*/
public void setNetworkProperties(String networkProperties) {
this.networkProperties = networkProperties;
}
}
| 1,511 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ConnectionControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Used by the Broker to control the connection state, the client should react to
* this command immediately.
*
* @openwire:marshaller code="18"
*/
@OpenWireType(typeCode = 18)
public class ConnectionControl extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.CONNECTION_CONTROL;
@OpenWireProperty(version = 1, sequence = 1)
protected boolean close;
@OpenWireProperty(version = 1, sequence = 2)
protected boolean exit;
@OpenWireProperty(version = 1, sequence = 3)
protected boolean faultTolerant;
@OpenWireProperty(version = 1, sequence = 4)
protected boolean resume;
@OpenWireProperty(version = 1, sequence = 5)
protected boolean suspend;
@OpenWireProperty(version = 6, sequence = 6)
protected String connectedBrokers = "";
@OpenWireProperty(version = 6, sequence = 7)
protected String reconnectTo = "";
@OpenWireProperty(version = 6, sequence = 8)
protected boolean rebalanceConnection;
@OpenWireProperty(version = 6, sequence = 9)
protected byte[] token;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processConnectionControl(this);
}
@Override
public boolean isConnectionControl() {
return true;
}
/**
* @openwire:property version=1
* @return Returns the close.
*/
public boolean isClose() {
return close;
}
/**
* @param close The close to set.
*/
public void setClose(boolean close) {
this.close = close;
}
/**
* @openwire:property version=1
* @return Returns the exit.
*/
public boolean isExit() {
return exit;
}
/**
* @param exit The exit to set.
*/
public void setExit(boolean exit) {
this.exit = exit;
}
/**
* @openwire:property version=1
* @return Returns the faultTolerant.
*/
public boolean isFaultTolerant() {
return faultTolerant;
}
/**
* @param faultTolerant The faultTolerant to set.
*/
public void setFaultTolerant(boolean faultTolerant) {
this.faultTolerant = faultTolerant;
}
/**
* @openwire:property version=1
* @return Returns the resume.
*/
public boolean isResume() {
return resume;
}
/**
* @param resume The resume to set.
*/
public void setResume(boolean resume) {
this.resume = resume;
}
/**
* @openwire:property version=1
* @return Returns the suspend.
*/
public boolean isSuspend() {
return suspend;
}
/**
* @param suspend The suspend to set.
*/
public void setSuspend(boolean suspend) {
this.suspend = suspend;
}
/**
* @openwire:property version=6 cache=false
* @return connected brokers.
*/
public String getConnectedBrokers() {
return this.connectedBrokers;
}
/**
* @param connectedBrokers the connectedBrokers to set
*/
public void setConnectedBrokers(String connectedBrokers) {
this.connectedBrokers = connectedBrokers;
}
/**
* @openwire:property version=6 cache=false
* @return the reconnectTo
*/
public String getReconnectTo() {
return this.reconnectTo;
}
/**
* @param reconnectTo the reconnectTo to set
*/
public void setReconnectTo(String reconnectTo) {
this.reconnectTo = reconnectTo;
}
/**
* @return the rebalanceConnection
* @openwire:property version=6 cache=false
*/
public boolean isRebalanceConnection() {
return this.rebalanceConnection;
}
/**
* @param rebalanceConnection the rebalanceConnection to set
*/
public void setRebalanceConnection(boolean rebalanceConnection) {
this.rebalanceConnection = rebalanceConnection;
}
/**
* @openwire:property version=8
* @return the token
*/
public byte[] getToken() {
return this.token;
}
/**
* @param token the token to set
*/
public void setToken(byte[] token) {
this.token = token;
}
}
| 1,512 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/IntegerResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="34"
*/
@OpenWireType(typeCode = 34)
public class IntegerResponse extends Response {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.INTEGER_RESPONSE;
@OpenWireProperty(version = 1, sequence = 1)
int result;
public IntegerResponse() {
}
public IntegerResponse(int result) {
this.result = result;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public int getResult() {
return result;
}
public void setResult(int result) {
this.result = result;
}
}
| 1,513 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/TransactionInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="7"
*/
@OpenWireType(typeCode = 7)
public class TransactionInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.TRANSACTION_INFO;
public static final byte BEGIN = 0;
public static final byte PREPARE = 1;
public static final byte COMMIT_ONE_PHASE = 2;
public static final byte COMMIT_TWO_PHASE = 3;
public static final byte ROLLBACK = 4;
public static final byte RECOVER = 5;
public static final byte FORGET = 6;
public static final byte END = 7;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected ConnectionId connectionId;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected TransactionId transactionId;
@OpenWireProperty(version = 1, sequence = 3)
protected byte type;
public TransactionInfo() {
}
public TransactionInfo(ConnectionId connectionId, TransactionId transactionId, byte type) {
this.connectionId = connectionId;
this.transactionId = transactionId;
this.type = type;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public ConnectionId getConnectionId() {
return connectionId;
}
public void setConnectionId(ConnectionId connectionId) {
this.connectionId = connectionId;
}
/**
* @openwire:property version=1 cache=true
*/
public TransactionId getTransactionId() {
return transactionId;
}
public void setTransactionId(TransactionId transactionId) {
this.transactionId = transactionId;
}
/**
* @openwire:property version=1
*/
public byte getType() {
return type;
}
public void setType(byte type) {
this.type = type;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
switch (type) {
case TransactionInfo.BEGIN:
return visitor.processBeginTransaction(this);
case TransactionInfo.END:
return visitor.processEndTransaction(this);
case TransactionInfo.PREPARE:
return visitor.processPrepareTransaction(this);
case TransactionInfo.COMMIT_ONE_PHASE:
return visitor.processCommitTransactionOnePhase(this);
case TransactionInfo.COMMIT_TWO_PHASE:
return visitor.processCommitTransactionTwoPhase(this);
case TransactionInfo.ROLLBACK:
return visitor.processRollbackTransaction(this);
case TransactionInfo.RECOVER:
return visitor.processRecoverTransactions(this);
case TransactionInfo.FORGET:
return visitor.processForgetTransaction(this);
default:
throw new IOException("Transaction info type unknown: " + type);
}
}
}
| 1,514 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireStreamMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
import org.apache.activemq.openwire.utils.IOExceptionSupport;
import org.apache.activemq.openwire.utils.OpenWireMarshallingSupport;
@OpenWireType(typeCode = 27)
public class OpenWireStreamMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_STREAM_MESSAGE;
@Override
public OpenWireStreamMessage copy() {
OpenWireStreamMessage copy = new OpenWireStreamMessage();
copy(copy);
return copy;
}
private void copy(OpenWireStreamMessage copy) {
storeContent();
super.copy(copy);
}
@Override
public void onSend() throws IOException {
super.onSend();
storeContent();
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String getMimeType() {
return "jms/stream-message";
}
/**
* Reads the contents of the StreamMessage instances into a single List<Object> instance
* and returns it. The read starts from the current position of the message which implies
* that the list might not be a complete view of the message if any prior read operations
* were invoked.
*
* @return a List containing the objects store in this message starting from the current position.
*
* @throws JMSException if an error occurs while reading the message.
*/
public List<Object> readStreamToList() throws IOException {
if (!hasContent()) {
return Collections.emptyList();
}
Buffer payload = getPayload();
DataByteArrayInputStream dataIn = new DataByteArrayInputStream(payload);
List<Object> result = new ArrayList<Object>();
while (true) {
try {
result.add(readNextElement(dataIn));
} catch (EOFException ex) {
break;
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
return result;
}
/**
* Given a DataInput instance, attempt to read OpenWireStreamMessage formatted values
* and returned the next element.
*
* @param input
* the input stream that contains the marshaled bytes.
*
* @return the next element encoded in the stream.
*
* @throws IOException if an error occurs while reading the next element from the stream
* @throws EOFException
*/
protected Object readNextElement(DataInput input) throws IOException {
int type = input.readByte();
if (type == -1) {
throw new EOFException("Reached end of stream.");
}
if (type == OpenWireMarshallingSupport.NULL) {
return null;
} else if (type == OpenWireMarshallingSupport.BIG_STRING_TYPE) {
return OpenWireMarshallingSupport.readUTF8(input);
} else if (type == OpenWireMarshallingSupport.STRING_TYPE) {
return input.readUTF();
} else if (type == OpenWireMarshallingSupport.LONG_TYPE) {
return Long.valueOf(input.readLong());
} else if (type == OpenWireMarshallingSupport.INTEGER_TYPE) {
return Integer.valueOf(input.readInt());
} else if (type == OpenWireMarshallingSupport.SHORT_TYPE) {
return Short.valueOf(input.readShort());
} else if (type == OpenWireMarshallingSupport.BYTE_TYPE) {
return Byte.valueOf(input.readByte());
} else if (type == OpenWireMarshallingSupport.FLOAT_TYPE) {
return new Float(input.readFloat());
} else if (type == OpenWireMarshallingSupport.DOUBLE_TYPE) {
return new Double(input.readDouble());
} else if (type == OpenWireMarshallingSupport.BOOLEAN_TYPE) {
return input.readBoolean() ? Boolean.TRUE : Boolean.FALSE;
} else if (type == OpenWireMarshallingSupport.CHAR_TYPE) {
return Character.valueOf(input.readChar());
} else if (type == OpenWireMarshallingSupport.BYTE_ARRAY_TYPE) {
int len = input.readInt();
byte[] value = new byte[len];
input.readFully(value);
return value;
} else {
throw new IOException("unknown type read from encoded stream.");
}
}
/**
* Writes the given set of Objects to the messages stream. The elements in the list
* must adhere to the supported types of a JMS StreamMessage or an exception will be
* thrown.
*
* @param elements
* the list of elements to store into the list.
*
* @throws IOException if an error occurs while writing the elements to the message.
*/
public void writeListToStream(List<Object> elements) throws IOException {
if (elements != null && !elements.isEmpty()) {
DataByteArrayOutputStream output = new DataByteArrayOutputStream();
for (Object value : elements) {
writeElement(value, output);
}
output.close();
setPayload(output.toBuffer());
}
}
/**
* Encodes the given object into the OpenWire marshaled form and writes it to the
* given DataOutput instance. Each element is written with a type identifier to
* allow for easy unmarshaling.
*
* @param value
* @param output
* @throws IOException
*/
protected void writeElement(Object value, DataOutput output) throws IOException {
if (value == null) {
OpenWireMarshallingSupport.marshalNull(output);
} else if (value instanceof String) {
OpenWireMarshallingSupport.marshalString(output, (String) value);
} else if (value instanceof Character) {
OpenWireMarshallingSupport.marshalChar(output, (Character) value);
} else if (value instanceof Boolean) {
OpenWireMarshallingSupport.marshalBoolean(output, (Boolean) value);
} else if (value instanceof Byte) {
OpenWireMarshallingSupport.marshalByte(output, (Byte) value);
} else if (value instanceof Short) {
OpenWireMarshallingSupport.marshalShort(output, (Short) value);
} else if (value instanceof Integer) {
OpenWireMarshallingSupport.marshalInt(output, (Integer) value);
} else if (value instanceof Float) {
OpenWireMarshallingSupport.marshalFloat(output, (Float) value);
} else if (value instanceof Double) {
OpenWireMarshallingSupport.marshalDouble(output, (Double) value);
} else if (value instanceof byte[]) {
OpenWireMarshallingSupport.marshalByteArray(output, (byte[]) value, 0, ((byte[]) value).length);
} else if (value instanceof Long) {
OpenWireMarshallingSupport.marshalLong(output, (Long) value);
} else {
throw new IOException("Unsupported Object type: " + value.getClass());
}
}
@Override
public void compress() throws IOException {
storeContent();
super.compress();
}
@Override
public String toString() {
return super.toString() + " OpenWireStreamMessage{}";
}
}
| 1,515 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/SubscriptionInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Used to represent a durable subscription.
*
* @openwire:marshaller code="55"
*/
@OpenWireType(typeCode = 55)
public class SubscriptionInfo implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.DURABLE_SUBSCRIPTION_INFO;
@OpenWireProperty(version = 1, sequence = 1)
protected String clientId;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 3)
protected String selector;
@OpenWireProperty(version = 1, sequence = 4)
protected String subscriptionName;
@OpenWireProperty(version = 3, sequence = 5)
protected OpenWireDestination subscribedDestination;
@OpenWireProperty(version = 11, sequence = 6)
protected boolean noLocal;
public SubscriptionInfo() {
}
public SubscriptionInfo(String clientId, String subscriptionName) {
this.clientId = clientId;
this.subscriptionName = subscriptionName;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* This is the a resolved destination that the subscription is receiving messages from. This
* will never be a pattern or a composite destination.
*
* @openwire:property version=1 cache=true
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
/**
* @openwire:property version=1
*/
public String getSelector() {
return selector;
}
public void setSelector(String selector) {
this.selector = selector;
}
/**
* @openwire:property version=1
*/
public String getSubcriptionName() {
return subscriptionName;
}
/**
* @param subscriptionName
* *
*/
public void setSubcriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
}
public String getSubscriptionName() {
return subscriptionName;
}
public void setSubscriptionName(String subscriptionName) {
this.subscriptionName = subscriptionName;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public String toString() {
return getClass().getSimpleName() + " { " + destination + " }";
}
@Override
public int hashCode() {
int h1 = clientId != null ? clientId.hashCode() : -1;
int h2 = subscriptionName != null ? subscriptionName.hashCode() : -1;
return h1 ^ h2;
}
@Override
public boolean equals(Object obj) {
boolean result = false;
if (obj instanceof SubscriptionInfo) {
SubscriptionInfo other = (SubscriptionInfo) obj;
result = (clientId == null && other.clientId == null ||
clientId != null && other.clientId != null && clientId.equals(other.clientId)) &&
(subscriptionName == null && other.subscriptionName == null ||
subscriptionName != null && other.subscriptionName != null && subscriptionName.equals(other.subscriptionName));
}
return result;
}
/**
* The destination the client originally subscribed to.. This may not match the {@see
* getDestination} method if the subscribed destination uses patterns or composites.
*
* If the subscribed destinations not set, this just returns the destination.
*
* @openwire:property version=3
*/
public OpenWireDestination getSubscribedDestination() {
if (subscribedDestination == null) {
return getDestination();
}
return subscribedDestination;
}
public void setSubscribedDestination(OpenWireDestination subscribedDestination) {
this.subscribedDestination = subscribedDestination;
}
/**
* @openwire:property version=11
*/
public boolean isNoLocal() {
return noLocal;
}
public void setNoLocal(boolean noLocal) {
this.noLocal = noLocal;
}
}
| 1,516 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/JournalTopicAck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="50"
*/
@OpenWireType(typeCode = 50)
public class JournalTopicAck implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.JOURNAL_ACK;
@OpenWireProperty(version = 1, sequence = 1)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 2)
protected MessageId messageId;
@OpenWireProperty(version = 1, sequence = 3)
protected long messageSequenceId;
@OpenWireProperty(version = 1, sequence = 4)
protected String subscritionName;
@OpenWireProperty(version = 1, sequence = 5)
protected String clientId;
@OpenWireProperty(version = 1, sequence = 6)
protected TransactionId transactionId;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
/**
* @openwire:property version=1
*/
public MessageId getMessageId() {
return messageId;
}
public void setMessageId(MessageId messageId) {
this.messageId = messageId;
}
/**
* @openwire:property version=1
*/
public long getMessageSequenceId() {
return messageSequenceId;
}
public void setMessageSequenceId(long messageSequenceId) {
this.messageSequenceId = messageSequenceId;
}
/**
* @openwire:property version=1
*/
public String getSubscritionName() {
return subscritionName;
}
public void setSubscritionName(String subscritionName) {
this.subscritionName = subscritionName;
}
/**
* @openwire:property version=1
*/
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
/**
* @openwire:property version=1
*/
public TransactionId getTransactionId() {
return transactionId;
}
public void setTransactionId(TransactionId transaction) {
this.transactionId = transaction;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{ " + destination + " }";
}
}
| 1,517 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireTextMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
import org.apache.activemq.openwire.codec.OpenWireFormat;
import org.apache.activemq.openwire.utils.IOExceptionSupport;
import org.apache.activemq.openwire.utils.OpenWireMarshallingSupport;
@OpenWireType(typeCode = 28)
public class OpenWireTextMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_TEXT_MESSAGE;
@OpenWireExtension(serialized = true)
protected String text;
@Override
public OpenWireTextMessage copy() {
OpenWireTextMessage copy = new OpenWireTextMessage();
copy(copy);
return copy;
}
private void copy(OpenWireTextMessage copy) {
super.copy(copy);
copy.text = text;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String getMimeType() {
return "jms/text-message";
}
public void setText(String text) {
this.text = text;
setContent(null);
}
public String getText() throws IOException {
if (text == null && getContent() != null) {
text = decodeContent();
setContent(null);
}
return text;
}
private String decodeContent() throws IOException {
String text = null;
if (hasContent()) {
InputStream is = null;
try {
is = new DataByteArrayInputStream(getPayload());
DataInputStream dataIn = new DataInputStream(is);
text = OpenWireMarshallingSupport.readUTF8(dataIn);
dataIn.close();
} catch (Exception ex) {
throw IOExceptionSupport.create(ex);
} finally {
if (is != null) {
try {
is.close();
} catch (IOException e) {
// ignore
}
}
}
}
return text;
}
@Override
public void beforeMarshall(OpenWireFormat wireFormat) throws IOException {
super.beforeMarshall(wireFormat);
storeContent();
}
@Override
public void storeContentAndClear() {
storeContent();
text = null;
}
@Override
public void storeContent() {
try {
Buffer content = getContent();
if (content == null && text != null) {
DataByteArrayOutputStream bytesOut = new DataByteArrayOutputStream();
OutputStream os = bytesOut;
DataOutputStream dataOut = new DataOutputStream(os);
OpenWireMarshallingSupport.writeUTF8(dataOut, this.text);
dataOut.close();
setPayload(bytesOut.toBuffer());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void clearMarshalledState() throws IOException {
super.clearMarshalledState();
this.text = null;
}
/**
* Clears out the message body. Clearing a message's body does not clear its
* header values or property entries. <p/>
* <P>
* If this message body was read-only, calling this method leaves the
* message body in the same state as an empty body in a newly created
* message.
*
* @throws IOException if the JMS provider fails to clear the message body
* due to some internal error.
*/
@Override
public void clearBody() throws IOException {
super.clearBody();
this.text = null;
}
@Override
public int getSize() {
if (size == 0 && content == null && text != null) {
size = DEFAULT_MINIMUM_MESSAGE_SIZE;
if (marshalledProperties != null) {
size += marshalledProperties.getLength();
}
size += text.length() * 2;
}
return super.getSize();
}
@Override
public String toString() {
String text = this.text;
if( text == null ) {
try {
text = decodeContent();
} catch (IOException ex) {
}
}
if (text != null) {
text = OpenWireMarshallingSupport.truncate64(text);
return getClass().getSimpleName() + " { text = " + text + " }";
} else {
return getClass().getSimpleName() + " { text = null }";
}
}
}
| 1,518 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ShutdownInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
/**
* @openwire:marshaller code="11"
*/
@OpenWireType(typeCode = 11)
public class ShutdownInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.SHUTDOWN_INFO;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processShutdown(this);
}
@Override
public boolean isShutdownInfo() {
return true;
}
}
| 1,519 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireTempDestination.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for the Temporary Destination types.
*
* @openwire:marshaller
*/
@OpenWireType(typeCode = 0)
public abstract class OpenWireTempDestination extends OpenWireDestination {
private static final Logger LOG = LoggerFactory.getLogger(OpenWireTempDestination.class);
@OpenWireExtension
protected transient String connectionId;
@OpenWireExtension
protected transient int sequenceId;
public OpenWireTempDestination() {
}
public OpenWireTempDestination(String name) {
super(name);
}
public OpenWireTempDestination(String connectionId, long sequenceId) {
super(connectionId + ":" + sequenceId);
}
@Override
public boolean isTemporary() {
return true;
}
@Override
public void setPhysicalName(String physicalName) {
super.setPhysicalName(physicalName);
if (!isComposite()) {
// Parse off the sequenceId off the end. this can fail if the temporary destination is
// generated by another JMS system via the JMS<->JMS Bridge
int p = this.physicalName.lastIndexOf(":");
if (p >= 0) {
String seqStr = this.physicalName.substring(p + 1).trim();
if (seqStr != null && seqStr.length() > 0) {
try {
sequenceId = Integer.parseInt(seqStr);
} catch (NumberFormatException e) {
LOG.debug("Did not parse sequence Id from {}", physicalName);
}
// The rest should be the connection id.
connectionId = this.physicalName.substring(0, p);
}
}
}
}
/**
* @return the ConnectionId that created this Temporary Destination
*/
public String getConnectionId() {
return connectionId;
}
/**
* Sets the ConnectionId String for the connection that created this Temporary Destination.
*
* @param connectionId
* the ConnectionId String of the parent Connection.
*/
public void setConnectionId(String connectionId) {
this.connectionId = connectionId;
}
/**
* @return the sequence Id used to generate this Temporary Destination.
*/
public int getSequenceId() {
return sequenceId;
}
}
| 1,520 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/PartialCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Represents a partial command; a large command that has been split up into
* pieces.
*
* @openwire:marshaller code="60"
*/
@OpenWireType(typeCode = 60)
public class PartialCommand implements Command {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.PARTIAL_COMMAND;
@OpenWireProperty(version = 1, sequence = 1)
private int commandId;
@OpenWireProperty(version = 1, sequence = 1, mandatory = true)
private byte[] data;
public PartialCommand() {
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
@Override
public int getCommandId() {
return commandId;
}
@Override
public void setCommandId(int commandId) {
this.commandId = commandId;
}
/**
* The data for this part of the command
*
* @openwire:property version=1 mandatory=true
*/
public byte[] getData() {
return data;
}
public void setData(byte[] data) {
this.data = data;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
throw new IllegalStateException("The transport layer should filter out PartialCommand instances but received: " + this);
}
@Override
public boolean isResponseRequired() {
return false;
}
@Override
public boolean isResponse() {
return false;
}
@Override
public boolean isBrokerInfo() {
return false;
}
@Override
public boolean isMessageDispatch() {
return false;
}
@Override
public boolean isMessage() {
return false;
}
@Override
public boolean isMessageAck() {
return false;
}
@Override
public boolean isMessageDispatchNotification() {
return false;
}
@Override
public boolean isShutdownInfo() {
return false;
}
@Override
public boolean isConnectionControl() {
return false;
}
@Override
public void setResponseRequired(boolean responseRequired) {
}
@Override
public boolean isWireFormatInfo() {
return false;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public boolean isConnectionInfo() {
return false;
}
@Override
public boolean isSessionInfo() {
return false;
}
@Override
public boolean isProducerInfo() {
return false;
}
@Override
public boolean isConsumerInfo() {
return false;
}
@Override
public String toString() {
int size = 0;
if (data != null) {
size = data.length;
}
return "PartialCommand[id: " + commandId + " data: " + size + " byte(s)]";
}
}
| 1,521 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ProducerId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="123"
*
*/
@OpenWireType(typeCode = 123)
public class ProducerId implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.PRODUCER_ID;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected String connectionId;
@OpenWireProperty(version = 1, sequence = 2)
protected long value;
@OpenWireProperty(version = 1, sequence = 3)
protected long sessionId;
@OpenWireExtension
protected transient int hashCode;
@OpenWireExtension
protected transient String key;
@OpenWireExtension
protected transient SessionId parentId;
public ProducerId() {
}
public ProducerId(SessionId sessionId, long producerId) {
this.connectionId = sessionId.getConnectionId();
this.sessionId = sessionId.getValue();
this.value = producerId;
}
public ProducerId(ProducerId id) {
this.connectionId = id.getConnectionId();
this.sessionId = id.getSessionId();
this.value = id.getValue();
}
public ProducerId(String producerKey) {
// Parse off the producerId
int p = producerKey.lastIndexOf(":");
if (p >= 0) {
value = Long.parseLong(producerKey.substring(p + 1));
producerKey = producerKey.substring(0, p);
}
setProducerSessionKey(producerKey);
}
public SessionId getParentId() {
if (parentId == null) {
parentId = new SessionId(this);
}
return parentId;
}
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = connectionId.hashCode() ^ (int)sessionId ^ (int)value;
}
return hashCode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != ProducerId.class) {
return false;
}
ProducerId id = (ProducerId)o;
return sessionId == id.sessionId && value == id.value && connectionId.equals(id.connectionId);
}
/**
* @param sessionKey
*/
private void setProducerSessionKey(String sessionKey) {
// Parse off the value
int p = sessionKey.lastIndexOf(":");
if (p >= 0) {
sessionId = Long.parseLong(sessionKey.substring(p + 1));
sessionKey = sessionKey.substring(0, p);
}
// The rest is the value
connectionId = sessionKey;
}
@Override
public String toString() {
if (key == null) {
key = connectionId + ":" + sessionId + ":" + value;
}
return key;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public String getConnectionId() {
return connectionId;
}
public void setConnectionId(String connectionId) {
this.connectionId = connectionId;
}
/**
* @openwire:property version=1
*/
public long getValue() {
return value;
}
public void setValue(long producerId) {
this.value = producerId;
}
/**
* @openwire:property version=1
*/
public long getSessionId() {
return sessionId;
}
public void setSessionId(long sessionId) {
this.sessionId = sessionId;
}
@Override
public boolean isMarshallAware() {
return false;
}
}
| 1,522 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireTopic.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
@OpenWireType(typeCode = 101)
public class OpenWireTopic extends OpenWireDestination {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_TOPIC;
public OpenWireTopic() {
}
public OpenWireTopic(String name) {
super(name);
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean isTopic() {
return true;
}
public String getTopicName() {
return getPhysicalName();
}
@Override
public byte getDestinationType() {
return TOPIC_TYPE;
}
@Override
protected String getQualifiedPrefix() {
return TOPIC_QUALIFIED_PREFIX;
}
}
| 1,523 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireTempTopic.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
@OpenWireType(typeCode = 103)
public class OpenWireTempTopic extends OpenWireTempDestination {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_TEMP_TOPIC;
public OpenWireTempTopic() {
}
public OpenWireTempTopic(String name) {
super(name);
}
public OpenWireTempTopic(ConnectionId connectionId, long sequenceId) {
super(connectionId.getValue(), sequenceId);
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean isTopic() {
return true;
}
@Override
public byte getDestinationType() {
return TEMP_TOPIC_TYPE;
}
@Override
protected String getQualifiedPrefix() {
return TEMP_TOPIC_QUALIFED_PREFIX;
}
public String getTopicName() {
return getPhysicalName();
}
}
| 1,524 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/LastPartialCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
/**
* Represents the end marker of a stream of {@link PartialCommand} instances.
*
* @openwire:marshaller code="61"
*/
@OpenWireType(typeCode = 61)
public class LastPartialCommand extends PartialCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.PARTIAL_LAST_COMMAND;
public LastPartialCommand() {
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
throw new IllegalStateException("The transport layer should filter out LastPartialCommand instances but received: " + this);
}
}
| 1,525 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireObjectMessage.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
import org.apache.activemq.openwire.codec.OpenWireFormat;
import org.apache.activemq.openwire.utils.IOExceptionSupport;
import org.apache.activemq.openwire.utils.ObjectMessageInputStream;
/**
* openwire:marshaller code="26"
*/
@OpenWireType(typeCode = 26)
public class OpenWireObjectMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_OBJECT_MESSAGE;
static final ClassLoader ACTIVEMQ_CLASSLOADER = OpenWireObjectMessage.class.getClassLoader();
@OpenWireExtension
protected transient Serializable object;
@Override
public OpenWireObjectMessage copy() {
OpenWireObjectMessage copy = new OpenWireObjectMessage();
copy(copy);
return copy;
}
private void copy(OpenWireObjectMessage copy) {
storeContent();
copy.object = null;
super.copy(copy);
}
@Override
public void storeContentAndClear() {
storeContent();
object = null;
}
@Override
public void storeContent() {
Buffer bodyAsBytes = getContent();
if (bodyAsBytes == null && object != null) {
try {
DataByteArrayOutputStream bytesOut = new DataByteArrayOutputStream();
OutputStream os = bytesOut;
if (isUseCompression()) {
compressed = true;
os = new DeflaterOutputStream(os);
}
DataOutputStream dataOut = new DataOutputStream(os);
ObjectOutputStream objOut = new ObjectOutputStream(dataOut);
objOut.writeObject(object);
objOut.flush();
objOut.reset();
objOut.close();
setContent(bytesOut.toBuffer());
} catch (IOException ioe) {
throw new RuntimeException(ioe.getMessage(), ioe);
}
}
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String getMimeType() {
return "jms/object-message";
}
/**
* Clears out the message body. Clearing a message's body does not clear its
* header values or property entries. <p/>
* <P>
* If this message body was read-only, calling this method leaves the
* message body in the same state as an empty body in a newly created
* message.
*
* @throws IOException if an error occurs removing the body.
*/
@Override
public void clearBody() throws IOException {
super.clearBody();
this.object = null;
}
/**
* Sets the serializable object containing this message's data. It is
* important to note that an <CODE>ObjectMessage</CODE> contains a
* snapshot of the object at the time <CODE>setObject()</CODE> is called;
* subsequent modifications of the object will have no effect on the
* <CODE>ObjectMessage</CODE> body.
*
* @param newObject the message's data
* @throws JMSException if the JMS provider fails to set the object due to
* some internal error.
* @throws javax.jms.MessageFormatException if object serialization fails.
* @throws javax.jms.MessageNotWriteableException if the message is in
* read-only mode.
*/
public void setObject(Serializable newObject) throws IOException {
this.object = newObject;
setContent(null);
storeContent();
}
/**
* Gets the serializable object containing this message's data. The default
* value is null.
*
* @return the serializable object containing this message's data
* @throws JMSException
*/
public Serializable getObject() throws IOException {
if (object == null && getContent() != null) {
try {
Buffer content = getContent();
InputStream is = new DataByteArrayInputStream(content);
if (isCompressed()) {
is = new InflaterInputStream(is);
}
DataInputStream dataIn = new DataInputStream(is);
ObjectMessageInputStream objIn = new ObjectMessageInputStream(dataIn);
try {
object = (Serializable)objIn.readObject();
} catch (ClassNotFoundException ce) {
throw IOExceptionSupport.create("Failed to build body from content. Serializable class not available to broker. Reason: " + ce, ce);
} finally {
dataIn.close();
objIn.close();
}
} catch (Exception e) {
throw IOExceptionSupport.create("Failed to build body from bytes. Reason: " + e, e);
}
}
return this.object;
}
@Override
public void beforeMarshall(OpenWireFormat wireFormat) throws IOException {
super.beforeMarshall(wireFormat);
storeContent();
}
@Override
public void clearMarshalledState() throws IOException {
super.clearMarshalledState();
this.object = null;
}
@Override
public void compress() throws IOException {
storeContent();
super.compress();
}
@Override
public String toString() {
try {
getObject();
} catch (IOException e) {
}
return super.toString();
}
}
| 1,526 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/CommandVisitorAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
public class CommandVisitorAdapter implements CommandVisitor {
@Override
public Response processAddConnection(ConnectionInfo info) throws Exception {
return null;
}
@Override
public Response processAddConsumer(ConsumerInfo info) throws Exception {
return null;
}
@Override
public Response processAddDestination(DestinationInfo info) throws Exception {
return null;
}
@Override
public Response processAddProducer(ProducerInfo info) throws Exception {
return null;
}
@Override
public Response processAddSession(SessionInfo info) throws Exception {
return null;
}
@Override
public Response processBeginTransaction(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processBrokerInfo(BrokerInfo info) throws Exception {
return null;
}
@Override
public Response processCommitTransactionOnePhase(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processCommitTransactionTwoPhase(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processEndTransaction(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processFlush(FlushCommand command) throws Exception {
return null;
}
@Override
public Response processForgetTransaction(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processKeepAlive(KeepAliveInfo info) throws Exception {
return null;
}
@Override
public Response processMessage(Message send) throws Exception {
return null;
}
@Override
public Response processMessageAck(MessageAck ack) throws Exception {
return null;
}
@Override
public Response processMessageDispatchNotification(MessageDispatchNotification notification) throws Exception {
return null;
}
@Override
public Response processMessagePull(MessagePull pull) throws Exception {
return null;
}
@Override
public Response processPrepareTransaction(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processProducerAck(ProducerAck ack) throws Exception {
return null;
}
@Override
public Response processRecoverTransactions(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processRemoveConnection(ConnectionId id, long lastDeliveredSequenceId) throws Exception {
return null;
}
@Override
public Response processRemoveConsumer(ConsumerId id, long lastDeliveredSequenceId) throws Exception {
return null;
}
@Override
public Response processRemoveDestination(DestinationInfo info) throws Exception {
return null;
}
@Override
public Response processRemoveProducer(ProducerId id) throws Exception {
return null;
}
@Override
public Response processRemoveSession(SessionId id, long lastDeliveredSequenceId) throws Exception {
return null;
}
@Override
public Response processRemoveSubscription(RemoveSubscriptionInfo info) throws Exception {
return null;
}
@Override
public Response processRollbackTransaction(TransactionInfo info) throws Exception {
return null;
}
@Override
public Response processShutdown(ShutdownInfo info) throws Exception {
return null;
}
@Override
public Response processWireFormat(WireFormatInfo info) throws Exception {
return null;
}
@Override
public Response processMessageDispatch(MessageDispatch dispatch) throws Exception {
return null;
}
@Override
public Response processControlCommand(ControlCommand command) throws Exception {
return null;
}
@Override
public Response processConnectionControl(ConnectionControl control) throws Exception {
return null;
}
@Override
public Response processConnectionError(ConnectionError error) throws Exception {
return null;
}
@Override
public Response processConsumerControl(ConsumerControl control) throws Exception {
return null;
}
}
| 1,527 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireBytesMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import java.util.Arrays;
import java.util.zip.Deflater;
import java.util.zip.Inflater;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.DataByteArrayInputStream;
import org.apache.activemq.openwire.buffer.DataByteArrayOutputStream;
/**
* Provides an abstraction layer around the standard OpenWireMessage object for
* implementation of a JMS style BytesMessage instance. This class provides access
* to the message body content via mechanism that make it easy to wrap this object
* and adhere to the JMS BytesMessage interface.
*
* openwire:marshaller code=24
*/
@OpenWireType(typeCode = 24)
public class OpenWireBytesMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_BYTES_MESSAGE;
@Override
public OpenWireBytesMessage copy() {
OpenWireBytesMessage copy = new OpenWireBytesMessage();
copy(copy);
return copy;
}
private void copy(OpenWireBytesMessage copy) {
super.copy(copy);
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String getMimeType() {
return "jms/bytes-message";
}
/**
* Gets the number of bytes of the message body when the message is in
* read-only mode. The value returned can be used to allocate a byte array.
* The value returned is the entire length of the message body, regardless
* of where the pointer for reading the message is currently located.
*
* @return number of bytes in the message
*
* @throws IOException if there is an error in retrieving the body length value.
*/
public long getBodyLength() throws IOException {
if (compressed) {
return getBodyBytes().length;
} else if (content != null) {
return content.getLength();
} else {
return 0;
}
}
/**
* Provides a fast way to read the message contents.
*
* This method, unlike the base class getContent method will perform any
* needed decompression on a message that was received with a compressed
* payload. The complete message body will then be read and returned in
* a byte array copy. Changes to the returned byte array are not reflected
* in the underlying message contents.
*
* @return a copy of the message contents, uncompressed as needed.
*
* @throws IOException if an error occurs while accessing the message payload.
*/
public byte[] getBodyBytes() throws IOException {
Buffer data = getPayload();
if (data == null) {
data = new Buffer(new byte[] {}, 0, 0);
}
return data.toByteArray();
}
/**
* Set the contents of this message.
*
* This method will, unlike the base class setContent method perform any
* necessary compression of the given bytes if the message is configured for
* message compression.
*
* @param bytes
* the new byte array to use to fill the message body.
*/
public void setBodyBytes(byte[] bytes) {
setBodyBytes(new Buffer(bytes));
}
/**
* Set the contents of this message.
*
* This method will, unlike the base class setContent method perform any
* necessary compression of the given bytes if the message is configured for
* message compression.
*
* @param buffer
* the new byte Buffer to use to fill the message body.
*/
public void setBodyBytes(Buffer buffer) {
try {
setPayload(buffer);
} catch (Exception ioe) {
throw new RuntimeException(ioe.getMessage(), ioe);
}
}
@Override
public String toString() {
return "OpenWireBytesMessage";
}
@Override
protected Buffer doDecompress() throws IOException {
Buffer compressed = getContent();
Inflater inflater = new Inflater();
DataByteArrayOutputStream decompressed = new DataByteArrayOutputStream();
try {
// Copy to avoid race on concurrent reads of compressed message payload.
compressed = new Buffer(compressed);
DataByteArrayInputStream compressedIn = new DataByteArrayInputStream(compressed);
int length = compressedIn.readInt();
compressedIn.close();
byte[] data = Arrays.copyOfRange(compressed.getData(), 4, compressed.getLength());
inflater.setInput(data);
byte[] buffer = new byte[length];
int count = inflater.inflate(buffer);
decompressed.write(buffer, 0, count);
return decompressed.toBuffer();
} catch (Exception e) {
throw new IOException(e);
} finally {
inflater.end();
decompressed.close();
}
}
@Override
protected void doCompress() throws IOException {
compressed = true;
Buffer bytes = getContent();
if (bytes != null) {
int length = bytes.getLength();
DataByteArrayOutputStream compressed = new DataByteArrayOutputStream();
compressed.write(new byte[4]);
Deflater deflater = new Deflater();
try {
deflater.setInput(bytes.data);
deflater.finish();
byte[] buffer = new byte[1024];
while (!deflater.finished()) {
int count = deflater.deflate(buffer);
compressed.write(buffer, 0, count);
}
compressed.writeInt(0, length);
setContent(compressed.toBuffer());
} finally {
deflater.end();
compressed.close();
}
}
}
}
| 1,528 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/ProducerAck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* A ProducerAck command is sent by a broker to a producer to let it know it has
* received and processed messages that it has produced. The producer will be
* flow controlled if it does not receive ProducerAck commands back from the
* broker.
*
* @openwire:marshaller code="19" version="3"
*/
@OpenWireType(typeCode = 19, version = 3)
public class ProducerAck extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.PRODUCER_ACK;
@OpenWireProperty(version = 3, sequence = 1)
protected ProducerId producerId;
@OpenWireProperty(version = 3, sequence = 1)
protected int size;
public ProducerAck() {
}
public ProducerAck(ProducerId producerId, int size) {
this.producerId = producerId;
this.size = size;
}
public void copy(ProducerAck copy) {
super.copy(copy);
copy.producerId = producerId;
copy.size = size;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processProducerAck(this);
}
/**
* The producer id that this ack message is destined for.
*
* @openwire:property version=3
*/
public ProducerId getProducerId() {
return producerId;
}
public void setProducerId(ProducerId producerId) {
this.producerId = producerId;
}
/**
* The number of bytes that are being acked.
*
* @openwire:property version=3
*/
public int getSize() {
return size;
}
public void setSize(int size) {
this.size = size;
}
}
| 1,529 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/NetworkBridgeFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @openwire:marshaller code="91"
*/
@OpenWireType(typeCode = 91)
public class NetworkBridgeFilter implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.NETWORK_BRIDGE_FILTER;
static final Logger LOG = LoggerFactory.getLogger(NetworkBridgeFilter.class);
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected BrokerId networkBrokerId;
@OpenWireProperty(version = 10, sequence = 2)
protected int messageTTL;
@OpenWireProperty(version = 10, sequence = 3)
protected int consumerTTL;
public NetworkBridgeFilter() {
}
public NetworkBridgeFilter(ConsumerInfo consumerInfo, BrokerId networkBrokerId, int messageTTL, int consumerTTL) {
this.networkBrokerId = networkBrokerId;
this.messageTTL = messageTTL;
this.consumerTTL = consumerTTL;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean isMarshallAware() {
return false;
}
// keep for backward compat with older wire formats
public int getNetworkTTL() {
return messageTTL;
}
public void setNetworkTTL(int networkTTL) {
messageTTL = networkTTL;
consumerTTL = networkTTL;
}
/**
* @openwire:property version=1 cache=true
*/
public BrokerId getNetworkBrokerId() {
return networkBrokerId;
}
public void setNetworkBrokerId(BrokerId remoteBrokerPath) {
this.networkBrokerId = remoteBrokerPath;
}
public void setMessageTTL(int messageTTL) {
this.messageTTL = messageTTL;
}
/**
* @openwire:property version=10
*/
public int getMessageTTL() {
return this.messageTTL;
}
public void setConsumerTTL(int consumerTTL) {
this.consumerTTL = consumerTTL;
}
/**
* @openwire:property version=10
*/
public int getConsumerTTL() {
return this.consumerTTL;
}
}
| 1,530 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/MessagePull.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Used to pull messages on demand, the command can have a time value that indicates
* how long the Broker keeps the pull request open before returning a MessageDispatch
* with a null payload.
*
* @openwire:marshaller code="20"
*/
@OpenWireType(typeCode = 20)
public class MessagePull extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.MESSAGE_PULL;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected ConsumerId consumerId;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 3)
protected long timeout;
@OpenWireProperty(version = 3, sequence = 4)
private String correlationId;
@OpenWireProperty(version = 4, sequence = 5)
private MessageId messageId;
@OpenWireExtension
private transient boolean tracked = false;
@OpenWireExtension
private transient int quantity = 1;
@OpenWireExtension
private transient boolean alwaysSignalDone;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processMessagePull(this);
}
/**
* Configures a message pull from the consumer information
*/
public void configure(ConsumerInfo info) {
setConsumerId(info.getConsumerId());
setDestination(info.getDestination());
}
/**
* @openwire:property version=1 cache=true
*/
public ConsumerId getConsumerId() {
return consumerId;
}
public void setConsumerId(ConsumerId consumerId) {
this.consumerId = consumerId;
}
/**
* @openwire:property version=1 cache=true
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
/**
* @openwire:property version=1
*/
public long getTimeout() {
return timeout;
}
public void setTimeout(long timeout) {
this.timeout = timeout;
}
/**
* An optional correlation ID which could be used by a broker to decide which messages are pulled
* on demand from a queue for a consumer
*
* @openwire:property version=3
*/
public String getCorrelationId() {
return correlationId;
}
public void setCorrelationId(String correlationId) {
this.correlationId = correlationId;
}
/**
* An optional message ID which could be used by a broker to decide which messages are pulled
* on demand from a queue for a consumer
*
* @openwire:property version=3
*/
public MessageId getMessageId() {
return messageId;
}
public void setMessageId(MessageId messageId) {
this.messageId = messageId;
}
public void setTracked(boolean tracked) {
this.tracked = tracked;
}
public boolean isTracked() {
return this.tracked;
}
}
| 1,531 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/CommandVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
public interface CommandVisitor {
Response processAddConnection(ConnectionInfo info) throws Exception;
Response processAddSession(SessionInfo info) throws Exception;
Response processAddProducer(ProducerInfo info) throws Exception;
Response processAddConsumer(ConsumerInfo info) throws Exception;
Response processRemoveConnection(ConnectionId id, long lastDeliveredSequenceId) throws Exception;
Response processRemoveSession(SessionId id, long lastDeliveredSequenceId) throws Exception;
Response processRemoveProducer(ProducerId id) throws Exception;
Response processRemoveConsumer(ConsumerId id, long lastDeliveredSequenceId) throws Exception;
Response processAddDestination(DestinationInfo info) throws Exception;
Response processRemoveDestination(DestinationInfo info) throws Exception;
Response processRemoveSubscription(RemoveSubscriptionInfo info) throws Exception;
Response processMessage(Message send) throws Exception;
Response processMessageAck(MessageAck ack) throws Exception;
Response processMessagePull(MessagePull pull) throws Exception;
Response processBeginTransaction(TransactionInfo info) throws Exception;
Response processPrepareTransaction(TransactionInfo info) throws Exception;
Response processCommitTransactionOnePhase(TransactionInfo info) throws Exception;
Response processCommitTransactionTwoPhase(TransactionInfo info) throws Exception;
Response processRollbackTransaction(TransactionInfo info) throws Exception;
Response processWireFormat(WireFormatInfo info) throws Exception;
Response processKeepAlive(KeepAliveInfo info) throws Exception;
Response processShutdown(ShutdownInfo info) throws Exception;
Response processFlush(FlushCommand command) throws Exception;
Response processBrokerInfo(BrokerInfo info) throws Exception;
Response processRecoverTransactions(TransactionInfo info) throws Exception;
Response processForgetTransaction(TransactionInfo info) throws Exception;
Response processEndTransaction(TransactionInfo info) throws Exception;
Response processMessageDispatchNotification(MessageDispatchNotification notification) throws Exception;
Response processProducerAck(ProducerAck ack) throws Exception;
Response processMessageDispatch(MessageDispatch dispatch) throws Exception;
Response processControlCommand(ControlCommand command) throws Exception;
Response processConnectionError(ConnectionError error) throws Exception;
Response processConnectionControl(ConnectionControl control) throws Exception;
Response processConsumerControl(ConsumerControl control) throws Exception;
}
| 1,532 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/DestinationInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* Used to create and destroy destinations on the broker.
*
* @openwire:marshaller code="8"
*/
@OpenWireType(typeCode = 8)
public class DestinationInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.DESTINATION_INFO;
public static final byte ADD_OPERATION_TYPE = 0;
public static final byte REMOVE_OPERATION_TYPE = 1;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected ConnectionId connectionId;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected OpenWireDestination destination;
@OpenWireProperty(version = 1, sequence = 3)
protected byte operationType;
@OpenWireProperty(version = 1, sequence = 4)
protected long timeout;
@OpenWireProperty(version = 1, sequence = 5)
protected BrokerId[] brokerPath;
public DestinationInfo() {
}
public DestinationInfo(ConnectionId connectionId, byte operationType, OpenWireDestination destination) {
this.connectionId = connectionId;
this.operationType = operationType;
this.destination = destination;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
public boolean isAddOperation() {
return operationType == ADD_OPERATION_TYPE;
}
public boolean isRemoveOperation() {
return operationType == REMOVE_OPERATION_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public ConnectionId getConnectionId() {
return connectionId;
}
public void setConnectionId(ConnectionId connectionId) {
this.connectionId = connectionId;
}
/**
* @openwire:property version=1 cache=true
*/
public OpenWireDestination getDestination() {
return destination;
}
public void setDestination(OpenWireDestination destination) {
this.destination = destination;
}
/**
* @openwire:property version=1
*/
public byte getOperationType() {
return operationType;
}
public void setOperationType(byte operationType) {
this.operationType = operationType;
}
/**
* @openwire:property version=1
*/
public long getTimeout() {
return timeout;
}
public void setTimeout(long timeout) {
this.timeout = timeout;
}
/**
* The route of brokers the command has moved through.
*
* @openwire:property version=1 cache=true
*/
public BrokerId[] getBrokerPath() {
return brokerPath;
}
public void setBrokerPath(BrokerId[] brokerPath) {
this.brokerPath = brokerPath;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
if (isAddOperation()) {
return visitor.processAddDestination(this);
} else if (isRemoveOperation()) {
return visitor.processRemoveDestination(this);
}
throw new IOException("Unknown operation type: " + getOperationType());
}
public DestinationInfo copy() {
DestinationInfo result = new DestinationInfo();
super.copy(result);
result.connectionId = connectionId;
result.destination = destination;
result.operationType = operationType;
result.brokerPath = brokerPath;
return result;
}
}
| 1,533 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/MarshallAware.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import org.apache.activemq.openwire.codec.OpenWireFormat;
public interface MarshallAware {
void beforeMarshall(OpenWireFormat wireFormat) throws IOException;
void afterMarshall(OpenWireFormat wireFormat) throws IOException;
void beforeUnmarshall(OpenWireFormat wireFormat) throws IOException;
void afterUnmarshall(OpenWireFormat wireFormat) throws IOException;
}
| 1,534 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireDestination.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.apache.activemq.openwire.annotations.OpenWireType;
/**
* Base Destination class used to provide most of the utilities necessary to deal
* with incoming and outgoing destination processing.
*/
@OpenWireType(typeCode = 0)
public abstract class OpenWireDestination implements DataStructure, Comparable<OpenWireDestination> {
public static final String PATH_SEPERATOR = ".";
public static final char COMPOSITE_SEPERATOR = ',';
public static final byte QUEUE_TYPE = 0x01;
public static final byte TOPIC_TYPE = 0x02;
public static final byte TEMP_MASK = 0x04;
public static final byte TEMP_TOPIC_TYPE = TOPIC_TYPE | TEMP_MASK;
public static final byte TEMP_QUEUE_TYPE = QUEUE_TYPE | TEMP_MASK;
public static final String QUEUE_QUALIFIED_PREFIX = "queue://";
public static final String TOPIC_QUALIFIED_PREFIX = "topic://";
public static final String TEMP_QUEUE_QUALIFED_PREFIX = "temp-queue://";
public static final String TEMP_TOPIC_QUALIFED_PREFIX = "temp-topic://";
public static final String TEMP_DESTINATION_NAME_PREFIX = "ID:";
@OpenWireProperty(version = 1, sequence = 1)
protected String physicalName;
@OpenWireExtension
protected transient OpenWireDestination[] compositeDestinations;
@OpenWireExtension
protected transient String[] destinationPaths;
@OpenWireExtension
protected transient boolean isPattern;
@OpenWireExtension
protected transient int hashValue;
@OpenWireExtension(serialized = true)
protected Map<String, String> options;
public OpenWireDestination() {
}
protected OpenWireDestination(String name) {
setPhysicalName(name);
}
public OpenWireDestination(OpenWireDestination composites[]) {
setCompositeDestinations(composites);
}
// static helper methods for working with destinations
// -------------------------------------------------------------------------
public static OpenWireDestination createDestination(String name, byte defaultType) {
if (name.startsWith(QUEUE_QUALIFIED_PREFIX)) {
return new OpenWireQueue(name.substring(QUEUE_QUALIFIED_PREFIX.length()));
} else if (name.startsWith(TOPIC_QUALIFIED_PREFIX)) {
return new OpenWireTopic(name.substring(TOPIC_QUALIFIED_PREFIX.length()));
} else if (name.startsWith(TEMP_QUEUE_QUALIFED_PREFIX)) {
return new OpenWireTempQueue(name.substring(TEMP_QUEUE_QUALIFED_PREFIX.length()));
} else if (name.startsWith(TEMP_TOPIC_QUALIFED_PREFIX)) {
return new OpenWireTempTopic(name.substring(TEMP_TOPIC_QUALIFED_PREFIX.length()));
}
switch (defaultType) {
case QUEUE_TYPE:
return new OpenWireQueue(name);
case TOPIC_TYPE:
return new OpenWireTopic(name);
case TEMP_QUEUE_TYPE:
return new OpenWireTempQueue(name);
case TEMP_TOPIC_TYPE:
return new OpenWireTempTopic(name);
default:
throw new IllegalArgumentException("Invalid default destination type: " + defaultType);
}
}
public static int compare(OpenWireDestination destination, OpenWireDestination destination2) {
if (destination == destination2) {
return 0;
}
if (destination == null) {
return -1;
} else if (destination2 == null) {
return 1;
} else {
if (destination.isQueue() == destination2.isQueue()) {
return destination.getPhysicalName().compareTo(destination2.getPhysicalName());
} else {
return destination.isQueue() ? -1 : 1;
}
}
}
@Override
public int compareTo(OpenWireDestination that) {
if (that == null) {
return -1;
}
return compare(this, that);
}
public boolean isComposite() {
return compositeDestinations != null;
}
public OpenWireDestination[] getCompositeDestinations() {
return compositeDestinations;
}
public void setCompositeDestinations(OpenWireDestination[] destinations) {
this.compositeDestinations = destinations;
this.destinationPaths = null;
this.hashValue = 0;
this.isPattern = false;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < destinations.length; i++) {
if (i != 0) {
sb.append(COMPOSITE_SEPERATOR);
}
if (getDestinationType() == destinations[i].getDestinationType()) {
sb.append(destinations[i].getPhysicalName());
} else {
sb.append(destinations[i].getQualifiedName());
}
}
physicalName = sb.toString();
}
public String getQualifiedName() {
if (isComposite()) {
return physicalName;
}
return getQualifiedPrefix() + physicalName;
}
protected abstract String getQualifiedPrefix();
/**
* @openwire:property version=1
*/
public String getPhysicalName() {
return physicalName;
}
public void setPhysicalName(String physicalName) {
physicalName = physicalName.trim();
final int length = physicalName.length();
if (physicalName.isEmpty()) {
throw new IllegalArgumentException("Invalid destination name: a non-empty name is required");
}
// options offset
int p = -1;
boolean composite = false;
for (int i = 0; i < length; i++) {
char c = physicalName.charAt(i);
if (c == '?') {
p = i;
break;
}
if (c == COMPOSITE_SEPERATOR) {
// won't be wild card
isPattern = false;
composite = true;
} else if (!composite && (c == '*' || c == '>')) {
isPattern = true;
}
}
// Strip off any options
if (p >= 0) {
String optstring = physicalName.substring(p + 1);
physicalName = physicalName.substring(0, p);
try {
options = parseQuery(optstring);
} catch (Exception e) {
throw new IllegalArgumentException("Invalid destination name: " + physicalName + ", it's options are not encoded properly: " + e);
}
}
this.physicalName = physicalName;
this.destinationPaths = null;
this.hashValue = 0;
if (composite) {
// Check to see if it is a composite.
Set<String> l = new HashSet<String>();
StringTokenizer iter = new StringTokenizer(physicalName, "" + COMPOSITE_SEPERATOR);
while (iter.hasMoreTokens()) {
String name = iter.nextToken().trim();
if (name.length() == 0) {
continue;
}
l.add(name);
}
compositeDestinations = new OpenWireDestination[l.size()];
int counter = 0;
for (String dest : l) {
compositeDestinations[counter++] = createDestination(dest);
}
}
}
public OpenWireDestination createDestination(String name) {
return createDestination(name, getDestinationType());
}
public String[] getDestinationPaths() {
if (destinationPaths != null) {
return destinationPaths;
}
List<String> l = new ArrayList<String>();
StringBuilder level = new StringBuilder();
final char separator = PATH_SEPERATOR.charAt(0);
for (char c : physicalName.toCharArray()) {
if (c == separator) {
l.add(level.toString());
level.delete(0, level.length());
} else {
level.append(c);
}
}
l.add(level.toString());
destinationPaths = new String[l.size()];
l.toArray(destinationPaths);
return destinationPaths;
}
public abstract byte getDestinationType();
public boolean isQueue() {
return false;
}
public boolean isTopic() {
return false;
}
public boolean isTemporary() {
return false;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
OpenWireDestination d = (OpenWireDestination)o;
return physicalName.equals(d.physicalName);
}
@Override
public int hashCode() {
if (hashValue == 0) {
hashValue = physicalName.hashCode();
}
return hashValue;
}
@Override
public String toString() {
return getQualifiedName();
}
public String getDestinationTypeAsString() {
switch (getDestinationType()) {
case QUEUE_TYPE:
return "Queue";
case TOPIC_TYPE:
return "Topic";
case TEMP_QUEUE_TYPE:
return "TempQueue";
case TEMP_TOPIC_TYPE:
return "TempTopic";
default:
throw new IllegalArgumentException("Invalid destination type: " + getDestinationType());
}
}
public Map<String, String> getOptions() {
return options;
}
@Override
public boolean isMarshallAware() {
return false;
}
public boolean isPattern() {
return isPattern;
}
private static Map<String, String> parseQuery(String uri) throws Exception {
if (uri != null) {
Map<String, String> rc = new HashMap<String, String>();
if (uri != null) {
String[] parameters = uri.split("&");
for (int i = 0; i < parameters.length; i++) {
int p = parameters[i].indexOf("=");
if (p >= 0) {
String name = URLDecoder.decode(parameters[i].substring(0, p), "UTF-8");
String value = URLDecoder.decode(parameters[i].substring(p + 1), "UTF-8");
rc.put(name, value);
} else {
rc.put(parameters[i], null);
}
}
}
return rc;
}
return Collections.emptyMap();
}
}
| 1,535 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.buffer.Buffer;
import org.apache.activemq.openwire.buffer.UTF8Buffer;
import org.apache.activemq.openwire.utils.IOExceptionSupport;
/**
* Base implementation of a JMS Message object.
*
* openwire:marshaller code="23"
*/
@OpenWireType(typeCode = 23)
public class OpenWireMessage extends Message {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_MESSAGE;
@OpenWireExtension
protected transient boolean useCompression;
@OpenWireExtension
protected transient boolean nestedMapAndListAllowed;
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @return String value that represents the MIMI type for the OpenWireMessage type.
*/
public String getMimeType() {
return "jms/message";
}
@Override
public OpenWireMessage copy() {
OpenWireMessage copy = new OpenWireMessage();
copy(copy);
return copy;
}
protected void copy(OpenWireMessage copy) {
copy.useCompression = useCompression;
copy.nestedMapAndListAllowed = nestedMapAndListAllowed;
super.copy(copy);
}
@Override
public int hashCode() {
MessageId id = getMessageId();
if (id != null) {
return id.hashCode();
} else {
return super.hashCode();
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != getClass()) {
return false;
}
OpenWireMessage msg = (OpenWireMessage) o;
MessageId oMsg = msg.getMessageId();
MessageId thisMsg = this.getMessageId();
return thisMsg != null && oMsg != null && oMsg.equals(thisMsg);
}
@Override
public void clearBody() throws IOException {
setContent(null);
}
public String getMessageIdAsString() {
MessageId messageId = this.getMessageId();
if (messageId == null) {
return null;
}
return messageId.toString();
}
public byte[] getCorrelationIdAsBytes() throws IOException {
return encodeString(this.getCorrelationId());
}
public void setCorrelationIdAsBytes(byte[] correlationId) throws IOException {
this.setCorrelationId(decodeString(correlationId));
}
/**
* @returns true if the message has data in its body, false if empty.
*/
public boolean hasContent() {
Buffer content = getContent();
if (content == null || content.isEmpty()) {
return false;
}
return true;
}
/**
* Provides a fast way to read the message contents.
*
* This method, unlike the base class getContent method will perform any needed
* decompression on a message that was received with a compressed payload.
*
* @return a the message contents, uncompressed as needed.
*
* @throws JMSException if an error occurs while accessing the message payload.
*/
public Buffer getPayload() throws IOException {
Buffer data = getContent();
if (data == null) {
data = new Buffer(new byte[] {}, 0, 0);
} else if (isCompressed()) {
try {
return decompress();
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
return data;
}
/**
* Set the contents of this message.
*
* This method will, unlike the base class setContent method perform any
* necessary compression of the given bytes if the message is configured for
* message compression.
*
* @param bytes
* the new byte array to use to fill the message body.
*
* @throws IOException if an error occurs while accessing the message payload.
*/
public void setPayload(byte[] bytes) throws IOException {
setPayload(new Buffer(bytes));
}
/**
* Set the contents of this message.
*
* This method will, unlike the base class setContent method perform any
* necessary compression of the given bytes if the message is configured for
* message compression.
*
* @param buffer
* the new byte Buffer to use to fill the message body.
*
* @throws JMSException if an error occurs while accessing the message payload.
*/
public void setPayload(Buffer buffer) throws IOException {
try {
setContent(buffer);
if (isUseCompression()) {
doCompress();
}
} catch (Exception ex) {
throw IOExceptionSupport.create(ex);
}
}
/**
* Seems to be invalid because the parameter doesn't initialize MessageId
* instance variables ProducerId and ProducerSequenceId
*
* @param value
* the string Message ID value to assign to this message.
*
* @throws IOException if an error occurs while parsing the String to a MessageID
*/
public void setMessageId(String value) throws IOException {
if (value != null) {
try {
MessageId id = new MessageId(value);
this.setMessageId(id);
} catch (NumberFormatException e) {
MessageId id = new MessageId();
id.setTextView(value);
this.setMessageId(id);
}
} else {
this.setMessageId((MessageId)null);
}
}
/**
* This will create an object of MessageId. For it to be valid, the instance
* variable ProducerId and producerSequenceId must be initialized.
*
* @param producerId
* the ProducerId of the producer that sends this message.
* @param producerSequenceId
* the logical producer sequence Id of this message.
*
* @throws JMSException if an error occurs while setting this MessageId
*/
public void setMessageId(ProducerId producerId, long producerSequenceId) throws IOException {
MessageId id = null;
try {
id = new MessageId(producerId, producerSequenceId);
this.setMessageId(id);
} catch (Throwable e) {
throw IOExceptionSupport.create("Invalid message id '" + id + "', reason: " + e.getMessage(), e);
}
}
public boolean propertyExists(String name) throws IOException {
try {
return (this.getProperties().containsKey(name) || getProperty(name)!= null);
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
@SuppressWarnings("rawtypes")
public Enumeration getPropertyNames() throws IOException {
try {
Vector<String> result = new Vector<String>(this.getProperties().keySet());
return result.elements();
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
@Override
public void setProperty(String name, Object value) throws IOException {
setProperty(name, value, true);
}
/**
* Allows for bulk set of Message properties.
*
* The base method does not attempt to intercept and map JMS specific properties
* into the fields of the Message, this is left to any wrapper implementation that
* wishes to apply JMS like behavior to the standard OpenWireMessage object.
*
* @param properties
* the properties to set on the Message instance.
*
* @throws JMSException if an error occurs while setting the properties.
*/
public void setProperties(Map<String, ?> properties) throws IOException {
for (Map.Entry<String, ?> entry : properties.entrySet()) {
setProperty(entry.getKey(), entry.getValue());
}
}
/**
* Allows for unchecked additions to the internal message properties if desired.
*
* This method is mainly useful for unit testing message types to ensure that get
* method fail on conversions from bad types.
*
* @param name
* the name of the property to set
* @param value
* the new value to assigned to the named property.
* @param checkValid
* indicates if a type validity check should be performed on the given object.
*
* @throws IOException if an error occurs while attempting to set the property value.
*/
public void setProperty(String name, Object value, boolean checkValid) throws IOException {
if (name == null || name.equals("")) {
throw new IllegalArgumentException("Property name cannot be empty or null");
}
if (value instanceof UTF8Buffer) {
value = value.toString();
}
if (checkValid) {
checkValidObject(value);
}
super.setProperty(name, value);
}
/**
* @return whether the Message allows for Map and List elements in its properties.
*/
public boolean isNestedMapAndListAllowed() {
return nestedMapAndListAllowed;
}
/**
* Sets whether the Message will allow for setting a Map or List instance in the
* Message properties. By default these elements are not allowed but can added if
* this option is set to true.
*
* @param nestedMapAndListAllowed
* true if nested Map and List instances are allowed in Message properties.
*/
public void setNestedMapAndListAllowed(boolean nestedMapAndListAllowed) {
this.nestedMapAndListAllowed = nestedMapAndListAllowed;
}
/**
* Sets whether the payload of the Message should be compressed.
*
* @param useCompression
* true if the binary payload should be compressed.
*/
public void setUseCompression(boolean useCompression) {
this.useCompression = useCompression;
}
/**
* @return true if the Message will compress the byte payload.
*/
public boolean isUseCompression() {
return useCompression;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processMessage(this);
}
@Override
public void storeContent() {
}
@Override
public void storeContentAndClear() {
storeContent();
}
/**
* Method that allows an application to inform the Message instance that it is
* about to be sent and that it should prepare its internal state for dispatch.
*
* @throws IOException if an error occurs or Message state is invalid.
*/
public void onSend() throws IOException {
}
protected void checkValidObject(Object value) throws IOException {
// TODO - We can probably remove these nested enabled check, the provider should
// do this since we are just a codec.
boolean valid = value instanceof Boolean || value instanceof Byte || value instanceof Short || value instanceof Integer || value instanceof Long;
valid = valid || value instanceof Float || value instanceof Double || value instanceof Character || value instanceof String || value == null;
if (!valid) {
if (isNestedMapAndListAllowed()) {
if (!(value instanceof Map || value instanceof List)) {
throw new IllegalArgumentException("Only objectified primitive objects, String, Map and List types are allowed but was: " + value + " type: " + value.getClass());
}
} else {
throw new IllegalArgumentException("Only objectified primitive objects and String types are allowed but was: " + value + " type: " + value.getClass());
}
}
}
protected static String decodeString(byte[] data) throws IOException {
try {
if (data == null) {
return null;
}
return new String(data, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw IOExceptionSupport.create("Invalid UTF-8 encoding: " + e.getMessage(), e);
}
}
protected static byte[] encodeString(String data) throws IOException {
try {
if (data == null) {
return null;
}
return data.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw IOExceptionSupport.create("Invalid UTF-8 encoding: " + e.getMessage(), e);
}
}
}
| 1,536 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/JournalTransaction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="54"
*/
@OpenWireType(typeCode = 54)
public class JournalTransaction implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.JOURNAL_TRANSACTION;
public static final byte XA_PREPARE = 1;
public static final byte XA_COMMIT = 2;
public static final byte XA_ROLLBACK = 3;
public static final byte LOCAL_COMMIT = 4;
public static final byte LOCAL_ROLLBACK = 5;
@OpenWireProperty(version = 1, sequence = 1)
public TransactionId transactionId;
@OpenWireProperty(version = 1, sequence = 2)
public byte type;
@OpenWireProperty(version = 1, sequence = 3)
public boolean wasPrepared;
public JournalTransaction(byte type, TransactionId transactionId, boolean wasPrepared) {
this.type = type;
this.transactionId = transactionId;
this.wasPrepared = wasPrepared;
}
public JournalTransaction() {
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1
*/
public TransactionId getTransactionId() {
return transactionId;
}
public void setTransactionId(TransactionId transactionId) {
this.transactionId = transactionId;
}
/**
* @openwire:property version=1
*/
public byte getType() {
return type;
}
public void setType(byte type) {
this.type = type;
}
/**
* @openwire:property version=1
*/
public boolean getWasPrepared() {
return wasPrepared;
}
public void setWasPrepared(boolean wasPrepared) {
this.wasPrepared = wasPrepared;
}
@Override
public boolean isMarshallAware() {
return false;
}
@Override
public String toString() {
return getClass().getSimpleName() + " { " + transactionId + " }";
}
}
| 1,537 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/OpenWireBlobMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
import org.apache.activemq.openwire.annotations.OpenWireType;
/**
* An implementation of ActiveMQ's BlobMessage for out of band BLOB transfer
*/
@OpenWireType(typeCode = 29, version = 3)
public class OpenWireBlobMessage extends OpenWireMessage {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_BLOB_MESSAGE;
public static final String BINARY_MIME_TYPE = "application/octet-stream";
@OpenWireProperty(version = 3, sequence = 1)
private String remoteBlobUrl;
@OpenWireProperty(version = 3, sequence = 2, cached = true)
private String mimeType;
@OpenWireProperty(version = 3, sequence = 3)
private boolean deletedByBroker;
@OpenWireExtension
private transient URL url;
@OpenWireExtension(serialized = true)
private String name;
@Override
public OpenWireBlobMessage copy() {
OpenWireBlobMessage copy = new OpenWireBlobMessage();
copy(copy);
return copy;
}
private void copy(OpenWireBlobMessage copy) {
super.copy(copy);
copy.setRemoteBlobUrl(getRemoteBlobUrl());
copy.setMimeType(getMimeType());
copy.setDeletedByBroker(isDeletedByBroker());
copy.setName(getName());
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=3 cache=false
*/
public String getRemoteBlobUrl() {
return remoteBlobUrl;
}
public void setRemoteBlobUrl(String remoteBlobUrl) {
this.remoteBlobUrl = remoteBlobUrl;
url = null;
}
/**
* The MIME type of the BLOB which can be used to apply different content types to messages.
*
* @openwire:property version=3 cache=true
*/
@Override
public String getMimeType() {
if (mimeType == null) {
return BINARY_MIME_TYPE;
}
return mimeType;
}
public void setMimeType(String mimeType) {
this.mimeType = mimeType;
}
public String getName() {
return name;
}
/**
* The name of the attachment which can be useful information if transmitting files over
* ActiveMQ
*/
public void setName(String name) {
this.name = name;
}
/**
* @openwire:property version=3 cache=false
*/
public boolean isDeletedByBroker() {
return deletedByBroker;
}
public void setDeletedByBroker(boolean deletedByBroker) {
this.deletedByBroker = deletedByBroker;
}
public URL getURL() throws IOException {
if (url == null && remoteBlobUrl != null) {
try {
url = new URL(remoteBlobUrl);
} catch (MalformedURLException e) {
throw new IOException(e.getMessage());
}
}
return url;
}
public void setURL(URL url) {
this.url = url;
remoteBlobUrl = url != null ? url.toExternalForm() : null;
}
}
| 1,538 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/LocalTransactionId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireExtension;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="111"
*
*/
@OpenWireType(typeCode = 111)
public class LocalTransactionId extends TransactionId implements Comparable<LocalTransactionId> {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.OPENWIRE_LOCAL_TRANSACTION_ID;
@OpenWireProperty(version = 1, sequence = 1)
protected long value;
@OpenWireProperty(version = 1, sequence = 2, cached = true)
protected ConnectionId connectionId;
@OpenWireExtension
private transient String transactionKey;
@OpenWireExtension
private transient int hashCode;
public LocalTransactionId() {
}
public LocalTransactionId(ConnectionId connectionId, long transactionId) {
this.connectionId = connectionId;
this.value = transactionId;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public boolean isXATransaction() {
return false;
}
@Override
public boolean isLocalTransaction() {
return true;
}
@Override
public String getTransactionKey() {
if (transactionKey == null) {
transactionKey = "TX:" + connectionId + ":" + value;
}
return transactionKey;
}
@Override
public String toString() {
return getTransactionKey();
}
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = connectionId.hashCode() ^ (int)value;
}
return hashCode;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != LocalTransactionId.class) {
return false;
}
LocalTransactionId tx = (LocalTransactionId)o;
return value == tx.value && connectionId.equals(tx.connectionId);
}
/**
* @param o
* @return
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(LocalTransactionId o) {
int result = connectionId.compareTo(o.connectionId);
if (result == 0) {
result = (int)(value - o.value);
}
return result;
}
/**
* @openwire:property version=1
*/
public long getValue() {
return value;
}
public void setValue(long transactionId) {
this.value = transactionId;
}
/**
* @openwire:property version=1 cache=true
*/
public ConnectionId getConnectionId() {
return connectionId;
}
public void setConnectionId(ConnectionId connectionId) {
this.connectionId = connectionId;
}
}
| 1,539 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/BrokerId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="124"
*/
@OpenWireType(typeCode = 124)
public class BrokerId implements DataStructure {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.BROKER_ID;
@OpenWireProperty(version = 1, sequence = 1)
protected String value;
public BrokerId() {
}
public BrokerId(String brokerId) {
this.value = brokerId;
}
@Override
public int hashCode() {
return value.hashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || o.getClass() != BrokerId.class) {
return false;
}
BrokerId id = (BrokerId)o;
return value.equals(id.value);
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
@Override
public String toString() {
return value;
}
/**
* @openwire:property version=1
*/
public String getValue() {
return value;
}
public void setValue(String brokerId) {
this.value = brokerId;
}
@Override
public boolean isMarshallAware() {
return false;
}
}
| 1,540 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/commands/SessionInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.commands;
import org.apache.activemq.openwire.annotations.OpenWireType;
import org.apache.activemq.openwire.annotations.OpenWireProperty;
/**
* @openwire:marshaller code="4"
*/
@OpenWireType(typeCode = 4)
public class SessionInfo extends BaseCommand {
public static final byte DATA_STRUCTURE_TYPE = CommandTypes.SESSION_INFO;
@OpenWireProperty(version = 1, sequence = 1, cached = true)
protected SessionId sessionId;
public SessionInfo() {
sessionId = new SessionId();
}
public SessionInfo(ConnectionInfo connectionInfo, long sessionId) {
this.sessionId = new SessionId(connectionInfo.getConnectionId(), sessionId);
}
public SessionInfo(SessionId sessionId) {
this.sessionId = sessionId;
}
@Override
public byte getDataStructureType() {
return DATA_STRUCTURE_TYPE;
}
/**
* @openwire:property version=1 cache=true
*/
public SessionId getSessionId() {
return sessionId;
}
public void setSessionId(SessionId sessionId) {
this.sessionId = sessionId;
}
public RemoveInfo createRemoveCommand() {
RemoveInfo command = new RemoveInfo(getSessionId());
command.setResponseRequired(isResponseRequired());
return command;
}
@Override
public Response visit(CommandVisitor visitor) throws Exception {
return visitor.processAddSession(this);
}
@Override
public boolean isSessionInfo() {
return true;
}
}
| 1,541 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/buffer/DataByteArrayOutputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.buffer;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UTFDataFormatException;
/**
* Optimized ByteArrayOutputStream
*/
public final class DataByteArrayOutputStream extends OutputStream implements DataOutput {
private static final int DEFAULT_SIZE = 2048;
protected byte buf[];
protected int pos;
/**
* Creates a new byte array output stream, with a buffer capacity of the
* specified size, in bytes.
*
* @param size
* the initial size.
* @exception IllegalArgumentException
* if size is negative.
*/
public DataByteArrayOutputStream(int size) {
if (size <= 0) {
throw new IllegalArgumentException("Invalid size: " + size);
}
buf = new byte[size];
}
public DataByteArrayOutputStream(byte buf[]) {
if (buf == null || buf.length == 0) {
throw new IllegalArgumentException("Invalid buffer");
}
this.buf = buf;
}
/**
* Creates a new byte array output stream.
*/
public DataByteArrayOutputStream() {
this(DEFAULT_SIZE);
}
/**
* start using a fresh byte array
*
* @param size
*/
public void restart(int size) {
buf = new byte[size];
pos = 0;
}
/**
* start using a fresh byte array
*/
public void restart() {
restart(DEFAULT_SIZE);
}
/**
* Get a Buffer from the stream
*
* @return the byte sequence
*/
public Buffer toBuffer() {
return new Buffer(buf, 0, pos);
}
public void write(Buffer data) throws IOException {
write(data.data, data.offset, data.length);
}
/**
* @return the underlying byte[] buffer
*/
public byte[] getData() {
return buf;
}
/**
* reset the output stream
*/
public void reset() {
pos = 0;
}
/**
* Set the current position for writing
*
* @param offset
* @throws IOException
*/
public void position(int offset) throws IOException {
ensureEnoughBuffer(offset);
pos = offset;
}
public int position() {
return pos;
}
public int size() {
return pos;
}
public void skip(int size) throws IOException {
ensureEnoughBuffer(pos + size);
pos += size;
}
//----- Implementation of OutputStream -----------------------------------//
/**
* Writes the specified byte to this byte array output stream.
*
* @param b
* the byte to be written.
* @throws IOException
*/
@Override
public void write(int b) throws IOException {
int newcount = pos + 1;
ensureEnoughBuffer(newcount);
buf[pos] = (byte) b;
pos = newcount;
}
/**
* Writes <code>len</code> bytes from the specified byte array starting at
* offset <code>off</code> to this byte array output stream.
*
* @param b
* the data.
* @param off
* the start offset in the data.
* @param len
* the number of bytes to write.
* @throws IOException
*/
@Override
public void write(byte b[], int off, int len) throws IOException {
if (len == 0) {
return;
}
int newcount = pos + len;
ensureEnoughBuffer(newcount);
System.arraycopy(b, off, buf, pos, len);
pos = newcount;
}
//----- Implementation of DataOutput -------------------------------------//
@Override
public void writeBoolean(boolean v) throws IOException {
ensureEnoughBuffer(pos + 1);
buf[pos++] = (byte) (v ? 1 : 0);
}
@Override
public void writeByte(int v) throws IOException {
ensureEnoughBuffer(pos + 1);
buf[pos++] = (byte) (v >>> 0);
}
@Override
public void writeShort(int v) throws IOException {
ensureEnoughBuffer(pos + 2);
buf[pos++] = (byte) (v >>> 8);
buf[pos++] = (byte) (v >>> 0);
}
@Override
public void writeChar(int v) throws IOException {
ensureEnoughBuffer(pos + 2);
buf[pos++] = (byte) (v >>> 8);
buf[pos++] = (byte) (v >>> 0);
}
@Override
public void writeInt(int v) throws IOException {
ensureEnoughBuffer(pos + 4);
buf[pos++] = (byte) (v >>> 24);
buf[pos++] = (byte) (v >>> 16);
buf[pos++] = (byte) (v >>> 8);
buf[pos++] = (byte) (v >>> 0);
}
@Override
public void writeLong(long v) throws IOException {
ensureEnoughBuffer(pos + 8);
buf[pos++] = (byte) (v >>> 56);
buf[pos++] = (byte) (v >>> 48);
buf[pos++] = (byte) (v >>> 40);
buf[pos++] = (byte) (v >>> 32);
buf[pos++] = (byte) (v >>> 24);
buf[pos++] = (byte) (v >>> 16);
buf[pos++] = (byte) (v >>> 8);
buf[pos++] = (byte) (v >>> 0);
}
@Override
public void writeFloat(float v) throws IOException {
writeInt(Float.floatToIntBits(v));
}
@Override
public void writeDouble(double v) throws IOException {
writeLong(Double.doubleToLongBits(v));
}
@Override
public void writeBytes(String s) throws IOException {
int length = s.length();
for (int i = 0; i < length; i++) {
write((byte) s.charAt(i));
}
}
@Override
public void writeChars(String s) throws IOException {
int length = s.length();
for (int i = 0; i < length; i++) {
int c = s.charAt(i);
write((c >>> 8) & 0xFF);
write((c >>> 0) & 0xFF);
}
}
@Override
public void writeUTF(String str) throws IOException {
int strlen = str.length();
int encodedsize = 0;
int c;
for (int i = 0; i < strlen; i++) {
c = str.charAt(i);
if ((c >= 0x0001) && (c <= 0x007F)) {
encodedsize++;
} else if (c > 0x07FF) {
encodedsize += 3;
} else {
encodedsize += 2;
}
}
if (encodedsize > 65535) {
throw new UTFDataFormatException("encoded string too long: " + encodedsize + " bytes");
}
ensureEnoughBuffer(pos + encodedsize + 2);
writeShort(encodedsize);
int i = 0;
for (i = 0; i < strlen; i++) {
c = str.charAt(i);
if (!((c >= 0x0001) && (c <= 0x007F))) {
break;
}
buf[pos++] = (byte) c;
}
for (; i < strlen; i++) {
c = str.charAt(i);
if ((c >= 0x0001) && (c <= 0x007F)) {
buf[pos++] = (byte) c;
} else if (c > 0x07FF) {
buf[pos++] = (byte) (0xE0 | ((c >> 12) & 0x0F));
buf[pos++] = (byte) (0x80 | ((c >> 6) & 0x3F));
buf[pos++] = (byte) (0x80 | ((c >> 0) & 0x3F));
} else {
buf[pos++] = (byte) (0xC0 | ((c >> 6) & 0x1F));
buf[pos++] = (byte) (0x80 | ((c >> 0) & 0x3F));
}
}
}
//----- Indexed Write Operations -----------------------------------------//
/**
* Write the given int value starting at the given index in the internal
* data buffer, if there is not enough space in the current buffer or the
* index is beyond the current buffer capacity then the size of the buffer
* is increased to fit the value.
*
* This method does not modify the tracked position for non-index writes
* which means that a subsequent write operation can overwrite the value
* written by this operation if the index given is beyond the current
* write position.
*
* @param index
* @param value
* @throws IOException
*/
public void writeInt(int index, int value) throws IOException {
ensureEnoughBuffer(index + 4);
buf[index++] = (byte) (value >>> 24);
buf[index++] = (byte) (value >>> 16);
buf[index++] = (byte) (value >>> 8);
buf[index++] = (byte) (value >>> 0);
}
//----- Internal implementation ------------------------------------------//
private void resize(int newcount) {
byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
System.arraycopy(buf, 0, newbuf, 0, pos);
buf = newbuf;
}
private void ensureEnoughBuffer(int newcount) {
if (newcount > buf.length) {
resize(newcount);
}
}
}
| 1,542 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/buffer/UTF8Buffer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.buffer;
import java.io.UnsupportedEncodingException;
import java.lang.ref.SoftReference;
/**
* Simple Buffer type class used to hold data that is know to be in UTF8
* format.
*/
final public class UTF8Buffer extends Buffer {
private SoftReference<String> value = new SoftReference<String>(null);
private int hashCode;
public UTF8Buffer(Buffer other) {
super(other);
}
public UTF8Buffer(byte[] data, int offset, int length) {
super(data, offset, length);
}
public UTF8Buffer(byte[] data) {
super(data);
}
public UTF8Buffer(String input) {
super(encode(input));
}
//----- Implementations --------------------------------------------------//
@Override
public int compareTo(Buffer other) {
// Do a char comparison.. not a byte for byte comparison.
return toString().compareTo(other.toString());
}
@Override
public String toString() {
String result = value.get();
if (result == null) {
result = decode(this);
value = new SoftReference<String>(result);
}
return result;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || obj.getClass() != UTF8Buffer.class) {
return false;
}
return equals(obj);
}
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = super.hashCode();
}
return hashCode;
}
//----- static convenience methods ---------------------------------------//
public static final byte[] encode(String input) {
try {
return input.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("A UnsupportedEncodingException was thrown for teh UTF-8 encoding. (This should never happen)");
}
}
static public String decode(Buffer buffer) {
try {
return new String(buffer.getData(), buffer.getOffset(), buffer.getLength(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("A UnsupportedEncodingException was thrown for teh UTF-8 encoding. (This should never happen)");
}
}
}
| 1,543 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/buffer/DataByteArrayInputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.buffer;
import java.io.DataInput;
import java.io.IOException;
import java.io.InputStream;
import java.io.UTFDataFormatException;
/**
* Optimized ByteArrayInputStream that can be used more than once
*/
public final class DataByteArrayInputStream extends InputStream implements DataInput {
private byte[] buf;
private int pos;
private int offset;
private int length;
/**
* Creates a <code>StoreByteArrayInputStream</code>.
*
* @param buf
* the input buffer.
*/
public DataByteArrayInputStream(byte buf[]) {
restart(buf);
}
/**
* Creates a <code>StoreByteArrayInputStream</code>.
*
* @param buffer
* the input buffer.
*/
public DataByteArrayInputStream(Buffer buffer) {
restart(buffer);
}
/**
* reset the <code>StoreByteArrayInputStream</code> to use an new Buffer
*
* @param buffer
*/
public void restart(Buffer buffer) {
this.buf = buffer.getData();
this.offset = buffer.getOffset();
this.pos = this.offset;
this.length = buffer.getLength();
}
/**
* re-start the input stream - reusing the current buffer
*
* @param size
*/
public void restart(int size) {
if (buf == null || buf.length < size) {
buf = new byte[size];
}
restart(buf);
this.length = size;
}
/**
* Creates <code>WireByteArrayInputStream</code> with a minmalist byte array
*/
public DataByteArrayInputStream() {
this(new byte[0]);
}
/**
* @return the size
*/
public int size() {
return pos - offset;
}
/**
* @return the underlying data array
*/
public byte[] getRawData() {
return buf;
}
public Buffer readBuffer(int len) {
int endpos = offset + length;
if (pos > endpos) {
return null;
}
if (pos + len > endpos) {
len = length - pos;
}
Buffer rc = new Buffer(buf, pos, len);
pos += len;
return rc;
}
/**
* reset the <code>StoreByteArrayInputStream</code> to use an new byte array
*
* @param newBuff
*/
public void restart(byte[] newBuff) {
buf = newBuff;
pos = 0;
length = newBuff.length;
}
public void restart() {
pos = 0;
length = buf.length;
}
public int getPos() {
return pos;
}
public void setPos(int pos) {
this.pos = pos;
}
public int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public int skip(int n) {
return skipBytes(n);
}
//----- InputStream implementation ---------------------------------------//
/**
* Reads the next byte of data from this input stream. The value byte is
* returned as an <code>int</code> in the range <code>0</code> to
* <code>255</code>. If no byte is available because the end of the stream
* has been reached, the value <code>-1</code> is returned.
* <p>
* This <code>read</code> method cannot block.
*
* @return the next byte of data, or <code>-1</code> if the end of the
* stream has been reached.
*/
@Override
public int read() {
return (pos < offset + length) ? (buf[pos++] & 0xff) : -1;
}
/**
* Reads up to <code>len</code> bytes of data into an array of bytes from
* this input stream.
*
* @param b
* the buffer into which the data is read.
* @param off
* the start offset of the data.
* @param len
* the maximum number of bytes read.
* @return the total number of bytes read into the buffer, or
* <code>-1</code> if there is no more data because the end of the
* stream has been reached.
*/
@Override
public int read(byte b[], int off, int len) {
if (b == null) {
throw new NullPointerException();
}
int endpos = offset + length;
if (pos >= endpos) {
return -1;
}
if (pos + len > endpos) {
len = length - pos;
}
if (len <= 0) {
return 0;
}
System.arraycopy(buf, pos, b, off, len);
pos += len;
return len;
}
//----- DataInput Implementation -----------------------------------------//
/**
* @return the number of bytes that can be read from the input stream
* without blocking.
*/
@Override
public int available() {
return offset + length - pos;
}
@Override
public void readFully(byte[] b) {
read(b, 0, b.length);
}
@Override
public void readFully(byte[] b, int off, int len) {
read(b, off, len);
}
@Override
public int skipBytes(int n) {
int endpos = offset + length;
if (pos + n > endpos) {
n = endpos - pos;
}
if (n < 0) {
return 0;
}
pos += n;
return n;
}
@Override
public boolean readBoolean() {
return read() != 0;
}
@Override
public byte readByte() {
return (byte) read();
}
@Override
public int readUnsignedByte() {
return read();
}
@Override
public short readShort() {
int ch1 = read();
int ch2 = read();
return (short) ((ch1 << 8) + (ch2 << 0));
}
@Override
public int readUnsignedShort() {
int ch1 = read();
int ch2 = read();
return (ch1 << 8) + (ch2 << 0);
}
@Override
public char readChar() {
int ch1 = read();
int ch2 = read();
return (char) ((ch1 << 8) + (ch2 << 0));
}
@Override
public int readInt() {
int ch1 = read();
int ch2 = read();
int ch3 = read();
int ch4 = read();
return (ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0);
}
@Override
public long readLong() {
long rc = ((long) buf[pos++] << 56) + ((long) (buf[pos++] & 255) << 48) + ((long) (buf[pos++] & 255) << 40) + ((long) (buf[pos++] & 255) << 32);
return rc + ((long) (buf[pos++] & 255) << 24) + ((buf[pos++] & 255) << 16) + ((buf[pos++] & 255) << 8) + ((buf[pos++] & 255) << 0);
}
@Override
public float readFloat() throws IOException {
return Float.intBitsToFloat(readInt());
}
@Override
public double readDouble() throws IOException {
return Double.longBitsToDouble(readLong());
}
@Override
public String readLine() {
int start = pos;
while (pos < offset + length) {
int c = read();
if (c == '\n') {
break;
}
if (c == '\r') {
c = read();
if (c != '\n' && c != -1) {
pos--;
}
break;
}
}
return new String(buf, start, pos);
}
@Override
public String readUTF() throws IOException {
int length = readUnsignedShort();
char[] characters = new char[length];
int c;
int c2;
int c3;
int count = 0;
int total = pos + length;
while (pos < total) {
c = buf[pos] & 0xff;
if (c > 127) {
break;
}
pos++;
characters[count++] = (char) c;
}
while (pos < total) {
c = buf[pos] & 0xff;
switch (c >> 4) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
pos++;
characters[count++] = (char) c;
break;
case 12:
case 13:
pos += 2;
if (pos > total) {
throw new UTFDataFormatException("bad string");
}
c2 = buf[pos - 1];
if ((c2 & 0xC0) != 0x80) {
throw new UTFDataFormatException("bad string");
}
characters[count++] = (char) (((c & 0x1F) << 6) | (c2 & 0x3F));
break;
case 14:
pos += 3;
if (pos > total) {
throw new UTFDataFormatException("bad string");
}
c2 = buf[pos - 2];
c3 = buf[pos - 1];
if (((c2 & 0xC0) != 0x80) || ((c3 & 0xC0) != 0x80)) {
throw new UTFDataFormatException("bad string");
}
characters[count++] = (char) (((c & 0x0F) << 12) | ((c2 & 0x3F) << 6) | ((c3 & 0x3F) << 0));
break;
default:
throw new UTFDataFormatException("bad string");
}
}
return new String(characters, 0, count);
}
}
| 1,544 |
0 |
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire
|
Create_ds/activemq-openwire/openwire-core/src/main/java/org/apache/activemq/openwire/buffer/Buffer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.buffer;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import org.apache.activemq.openwire.utils.HexSupport;
/**
* Wrapper for byte[] instances used to manage marshaled data
*/
public class Buffer implements Comparable<Buffer> {
public byte[] data;
public int offset;
public int length;
public Buffer(ByteBuffer other) {
this(other.array(), other.arrayOffset()+other.position(), other.remaining());
}
public Buffer(Buffer other) {
this(other.data, other.offset, other.length);
}
public Buffer(int size) {
this(new byte[size]);
}
public Buffer(byte data[]) {
this(data, 0, data.length);
}
public Buffer(byte data[], int offset, int length) {
if (data == null) {
throw new IllegalArgumentException("byte array value cannot by null");
}
if (offset + length > data.length) {
throw new IndexOutOfBoundsException(
String.format("offset %d + length %d must be <= the data.length %d", data, length, data.length));
}
this.data = data;
this.offset = offset;
this.length = length;
}
//-----Implementation ----------------------------------------------------//
public byte[] getData() {
return data;
}
public int getLength() {
return length;
}
public int getOffset() {
return offset;
}
final public boolean isEmpty() {
return length == 0;
}
final public byte[] toByteArray() {
byte[] data = this.data;
int length = this.length;
if (length != data.length) {
byte t[] = new byte[length];
System.arraycopy(data, offset, t, 0, length);
data = t;
}
return data;
}
final public boolean equals(Buffer obj) {
byte[] data = this.data;
int offset = this.offset;
int length = this.length;
if (length != obj.length) {
return false;
}
byte[] objData = obj.data;
int objOffset = obj.offset;
for (int i = 0; i < length; i++) {
if (objData[objOffset + i] != data[offset + i]) {
return false;
}
}
return true;
}
//----- Platform overrides -----------------------------------------------//
@Override
public String toString() {
int size = length;
boolean asciiPrintable = true;
for (int i = 0; i < size; i++) {
int c = data[offset + i] & 0xFF;
if (c > 126 || c < 32) { // not a printable char
if (!(c == '\n' || c == '\r' | c == '\n' | c == 27)) {
// except these.
asciiPrintable = false;
break;
}
}
}
if (asciiPrintable) {
char decoded[] = new char[length];
for (int i = 0; i < size; i++) {
decoded[i] = (char) (data[offset + i] & 0xFF);
}
return "ascii: " + new String(decoded);
} else {
return "hex: " + HexSupport.toHexFromBuffer(this);
}
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null || obj.getClass() != Buffer.class) {
return false;
}
return equals((Buffer) obj);
}
@Override
public int hashCode() {
byte[] data = this.data;
int offset = this.offset;
int length = this.length;
byte[] target = new byte[4];
for (int i = 0; i < length; i++) {
target[i % 4] ^= data[offset + i];
}
return target[0] << 24 | target[1] << 16 | target[2] << 8 | target[3];
}
@Override
public int compareTo(Buffer o) {
if (this == o) {
return 0;
}
byte[] data = this.data;
int offset = this.offset;
int length = this.length;
int oLength = o.length;
int oOffset = o.offset;
byte[] oData = o.data;
int minLength = Math.min(length, oLength);
if (offset == oOffset) {
int pos = offset;
int limit = minLength + offset;
while (pos < limit) {
int b1 = 0xFF & data[pos];
int b2 = 0xFF & oData[pos];
if (b1 != b2) {
return b1 - b2;
}
pos++;
}
} else {
int offset1 = offset;
int offset2 = oOffset;
while (minLength-- != 0) {
int b1 = 0xFF & data[offset1++];
int b2 = 0xFF & oData[offset2++];
if (b1 != b2) {
return b1 - b2;
}
}
}
return length - oLength;
}
//----- Utility Stream write methods -------------------------------------//
/**
* same as out.write(data, offset, length);
*/
public void writeTo(DataOutput out) throws IOException {
out.write(data, offset, length);
}
/**
* same as out.write(data, offset, length);
*/
public void writeTo(OutputStream out) throws IOException {
out.write(data, offset, length);
}
/**
* same as in.readFully(data, offset, length);
*/
public void readFrom(DataInput in) throws IOException {
in.readFully(data, offset, length);
}
/**
* same as in.read(data, offset, length);
*/
public int readFrom(InputStream in) throws IOException {
return in.read(data, offset, length);
}
}
| 1,545 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorPlugin.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.common.server.connectors.ConnectorFactory;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.ConnectorPlugin;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
/**
* Hive plugin.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveConnectorPlugin implements ConnectorPlugin {
private static final String CONNECTOR_TYPE = "hive";
private static final HiveTypeConverter HIVE_TYPE_CONVERTER = new HiveTypeConverter();
private static final HiveConnectorInfoConverter INFO_CONVERTER_HIVE
= new HiveConnectorInfoConverter(HIVE_TYPE_CONVERTER);
/**
* {@inheritDoc}
*/
@Override
public String getType() {
return CONNECTOR_TYPE;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorFactory create(final ConnectorContext connectorContext) {
return new HiveConnectorFactory(
INFO_CONVERTER_HIVE,
connectorContext
);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTypeConverter getTypeConverter() {
return HIVE_TYPE_CONVERTER;
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorInfoConverter getInfoConverter() {
return INFO_CONVERTER_HIVE;
}
}
| 1,546 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.SpringConnectorFactory;
import com.netflix.metacat.connector.hive.configs.CacheConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorClientConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorConfig;
import com.netflix.metacat.connector.hive.configs.HiveConnectorFastServiceConfig;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.springframework.core.env.MapPropertySource;
import java.util.HashMap;
import java.util.Map;
/**
* HiveConnectorFactory.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorFactory extends SpringConnectorFactory {
/**
* Constructor.
*
* @param infoConverter hive info converter
* @param connectorContext connector config
*/
HiveConnectorFactory(
final HiveConnectorInfoConverter infoConverter,
final ConnectorContext connectorContext
) {
super(infoConverter, connectorContext);
final boolean useLocalMetastore = Boolean.parseBoolean(
connectorContext.getConfiguration()
.getOrDefault(HiveConfigConstants.USE_EMBEDDED_METASTORE, "false")
);
final boolean useFastHiveService = useLocalMetastore && Boolean.parseBoolean(
connectorContext.getConfiguration()
.getOrDefault(HiveConfigConstants.USE_FASTHIVE_SERVICE, "false")
);
final Map<String, Object> properties = new HashMap<>();
properties.put("useHiveFastService", useFastHiveService);
properties.put("useEmbeddedClient", useLocalMetastore);
properties.put("metacat.cache.enabled", connectorContext.getConfig().isCacheEnabled());
super.addEnvProperties(new MapPropertySource("HIVE_CONNECTOR", properties));
super.registerClazz(HiveConnectorFastServiceConfig.class,
HiveConnectorClientConfig.class, HiveConnectorConfig.class, CacheConfig.class);
super.refresh();
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorDatabaseService getDatabaseService() {
return this.ctx.getBean(HiveConnectorDatabaseService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorTableService getTableService() {
return this.ctx.getBean(HiveConnectorTableService.class);
}
/**
* {@inheritDoc}
*/
@Override
public ConnectorPartitionService getPartitionService() {
return this.ctx.getBean(HiveConnectorPartitionService.class);
}
}
| 1,547 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorTableService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorTableService;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Hive base connector base service impl.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
@Slf4j
public class HiveConnectorTableService implements ConnectorTableService {
private static final String PARAMETER_EXTERNAL = "EXTERNAL";
protected final HiveConnectorInfoConverter hiveMetacatConverters;
protected final ConnectorContext connectorContext;
private final String catalogName;
private final IMetacatHiveClient metacatHiveClient;
private final HiveConnectorDatabaseService hiveConnectorDatabaseService;
private final boolean allowRenameTable;
private final boolean onRenameConvertToExternal;
/**
* Constructor.
*
* @param catalogName catalog name
* @param metacatHiveClient hive client
* @param hiveConnectorDatabaseService hive database service
* @param hiveMetacatConverters converter
* @param connectorContext the connector context
*/
public HiveConnectorTableService(
final String catalogName,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final HiveConnectorInfoConverter hiveMetacatConverters,
final ConnectorContext connectorContext
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
this.hiveConnectorDatabaseService = hiveConnectorDatabaseService;
this.catalogName = catalogName;
this.allowRenameTable = Boolean.parseBoolean(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.ALLOW_RENAME_TABLE, "false")
);
this.onRenameConvertToExternal = Boolean.parseBoolean(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.ON_RENAME_CONVERT_TO_EXTERNAL,
"true")
);
this.connectorContext = connectorContext;
}
/**
* getTable.
*
* @param requestContext The request context
* @param name The qualified name of the resource to get
* @return table dto
*/
@Override
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), name.getTableName());
return hiveMetacatConverters.toTableInfo(name, table);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed get hive table %s", name), exception);
}
}
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException | InvalidObjectException exception) {
//the NoSuchObjectException is converted into InvalidObjectException in hive client
if (exception.getMessage().startsWith(tableName.getDatabaseName())) {
throw new DatabaseNotFoundException(
QualifiedName.ofDatabase(tableName.getCatalogName(),
tableName.getDatabaseName()), exception);
} else {
//table name or column invalid defintion exception
throw new InvalidMetaException(tableName, exception);
}
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
void updateTable(
final ConnectorRequestContext requestContext,
final Table table,
final TableInfo tableInfo
) throws MetaException {
if (table.getParameters() == null || table.getParameters().isEmpty()) {
table.setParameters(Maps.newHashMap());
}
//if this a type of table, we all mark it external table
//otherwise leaves it as such as VIRTUAL_VIEW
if (!isVirtualView(table)) {
table.getParameters().putIfAbsent(PARAMETER_EXTERNAL, "TRUE");
} else {
validAndUpdateVirtualView(table);
}
if (tableInfo.getMetadata() != null) {
table.getParameters().putAll(tableInfo.getMetadata());
}
//no other information is needed for iceberg table
if (connectorContext.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
table.setPartitionKeys(Collections.emptyList());
log.debug("Skipping seder and set partition key to empty when updating iceberg table in hive");
return;
}
//storage
final StorageDescriptor sd = table.getSd() != null ? table.getSd() : new StorageDescriptor();
String inputFormat = null;
String outputFormat = null;
Map<String, String> sdParameters = Maps.newHashMap();
final String location =
tableInfo.getSerde() == null ? null : tableInfo.getSerde().getUri();
if (location != null) {
sd.setLocation(location);
} else if (sd.getLocation() == null) {
final String locationStr = hiveConnectorDatabaseService.get(requestContext,
QualifiedName.ofDatabase(tableInfo.getName().
getCatalogName(), tableInfo.getName().getDatabaseName())).getUri();
final Path databasePath = new Path(locationStr);
final Path targetPath = new Path(databasePath, tableInfo.getName().getTableName());
sd.setLocation(targetPath.toString());
}
if (sd.getSerdeInfo() == null) {
sd.setSerdeInfo(new SerDeInfo());
}
final SerDeInfo serdeInfo = sd.getSerdeInfo();
serdeInfo.setName(tableInfo.getName().getTableName());
final StorageInfo storageInfo = tableInfo.getSerde();
if (storageInfo != null) {
if (!Strings.isNullOrEmpty(storageInfo.getSerializationLib())) {
serdeInfo.setSerializationLib(storageInfo.getSerializationLib());
}
if (storageInfo.getSerdeInfoParameters() != null && !storageInfo.getSerdeInfoParameters().isEmpty()) {
serdeInfo.setParameters(storageInfo.getSerdeInfoParameters());
}
inputFormat = storageInfo.getInputFormat();
outputFormat = storageInfo.getOutputFormat();
if (storageInfo.getParameters() != null && !storageInfo.getParameters().isEmpty()) {
sdParameters = storageInfo.getParameters();
}
} else if (table.getSd() != null) {
final HiveStorageFormat hiveStorageFormat = this.extractHiveStorageFormat(table);
serdeInfo.setSerializationLib(hiveStorageFormat.getSerde());
serdeInfo.setParameters(ImmutableMap.of());
inputFormat = hiveStorageFormat.getInputFormat();
outputFormat = hiveStorageFormat.getOutputFormat();
}
final ImmutableList.Builder<FieldSchema> columnsBuilder = ImmutableList.builder();
final ImmutableList.Builder<FieldSchema> partitionKeysBuilder = ImmutableList.builder();
if (tableInfo.getFields() != null) {
for (FieldInfo column : tableInfo.getFields()) {
final FieldSchema field = hiveMetacatConverters.metacatToHiveField(column);
if (column.isPartitionKey()) {
partitionKeysBuilder.add(field);
} else {
columnsBuilder.add(field);
}
}
}
final ImmutableList<FieldSchema> columns = columnsBuilder.build();
if (!columns.isEmpty()) {
sd.setCols(columns);
}
if (!Strings.isNullOrEmpty(inputFormat)) {
sd.setInputFormat(inputFormat);
}
if (!Strings.isNullOrEmpty(outputFormat)) {
sd.setOutputFormat(outputFormat);
}
if (sd.getParameters() == null) {
sd.setParameters(sdParameters);
}
//partition keys
final ImmutableList<FieldSchema> partitionKeys = partitionKeysBuilder.build();
if (!partitionKeys.isEmpty()) {
table.setPartitionKeys(partitionKeys);
}
table.setSd(sd);
}
private void validAndUpdateVirtualView(final Table table) {
if (isVirtualView(table)
&& Strings.isNullOrEmpty(table.getViewOriginalText())) {
throw new MetacatBadRequestException(
String.format("Invalid view creation for %s/%s. Missing viewOrginialText",
table.getDbName(),
table.getDbName()));
}
if (Strings.isNullOrEmpty(table.getViewExpandedText())) {
//set viewExpandedText to viewOriginalTest
table.setViewExpandedText(table.getViewOriginalText());
}
//setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs
if (Strings.isNullOrEmpty(table.getSd().getLocation())) {
table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName());
}
}
private boolean isVirtualView(final Table table) {
return null != table.getTableType()
&& table.getTableType().equals(TableType.VIRTUAL_VIEW.toString());
}
/**
* Delete a table with the given qualified name.
*
* @param requestContext The request context
* @param name The qualified name of the resource to delete
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
metacatHiveClient.dropTable(name.getDatabaseName(), name.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed delete hive table %s", name), exception);
}
}
/**
* Update a resource with the given metadata.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final Table existingTable = hiveMetacatConverters.fromTableInfo(get(requestContext, tableInfo.getName()));
update(requestContext, existingTable, tableInfo);
}
protected void update(final ConnectorRequestContext requestContext,
final Table existingTable, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
updateTable(requestContext, existingTable, tableInfo);
metacatHiveClient.alterTable(tableName.getDatabaseName(),
tableName.getTableName(),
existingTable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed update hive table %s", tableName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
if (tableFilter == null || tableName.startsWith(tableFilter)) {
final QualifiedName qualifiedName =
QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
qualifiedNames.add(qualifiedName);
}
}
////supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listNames hive table %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<TableInfo> tableInfos = Lists.newArrayList();
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
}
return ConnectorUtils.paginate(tableInfos, pageable);
} catch (MetaException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
boolean result;
try {
result = metacatHiveClient.getTableByName(name.getDatabaseName(), name.getTableName()) != null;
} catch (NoSuchObjectException exception) {
result = false;
} catch (TException exception) {
throw new ConnectorException(String.format("Failed exists hive table %s", name), exception);
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public void rename(
final ConnectorRequestContext context,
final QualifiedName oldName,
final QualifiedName newName
) {
if (!allowRenameTable) {
throw new ConnectorException(
"Renaming tables is disabled in catalog " + catalogName, null);
}
try {
if (onRenameConvertToExternal) {
//
// If this is a managed table(EXTERNAL=FALSE), then convert it to an external table before renaming it.
// We do not want the metastore to move the location/data.
//
final Table table = metacatHiveClient.getTableByName(oldName.getDatabaseName(), oldName.getTableName());
Map<String, String> parameters = table.getParameters();
if (parameters == null) {
parameters = Maps.newHashMap();
table.setParameters(parameters);
}
if (!parameters.containsKey(PARAMETER_EXTERNAL)
|| parameters.get(PARAMETER_EXTERNAL).equalsIgnoreCase("FALSE")) {
parameters.put(PARAMETER_EXTERNAL, "TRUE");
metacatHiveClient.alterTable(oldName.getDatabaseName(), oldName.getTableName(), table);
}
}
metacatHiveClient.rename(oldName.getDatabaseName(), oldName.getTableName(),
newName.getDatabaseName(), newName.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(oldName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(newName, exception);
} catch (TException exception) {
throw new ConnectorException(
"Failed renaming from hive table" + oldName.toString()
+ " to hive talbe " + newName.toString(), exception);
}
}
private HiveStorageFormat extractHiveStorageFormat(final Table table) throws MetaException {
final StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new MetaException("Table is missing storage descriptor");
}
final SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new MetaException(
"Table storage descriptor is missing SerDe info");
}
final String outputFormat = descriptor.getOutputFormat();
final String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerde().equals(serializationLib)) {
return format;
}
}
throw new MetaException(
String.format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
@Override
public List<QualifiedName> getTableNames(
final ConnectorRequestContext context,
final QualifiedName name,
final String filter,
@Nullable final Integer limit) {
try {
if (name.isDatabaseDefinition()) {
return metacatHiveClient.getTableNames(name.getDatabaseName(), filter, limit == null ? -1 : limit)
.stream()
.map(n -> QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), n))
.collect(Collectors.toList());
} else {
int limitSize = limit == null || limit < 0 ? Integer.MAX_VALUE : limit;
final List<String> databaseNames = metacatHiveClient.getAllDatabases();
final List<QualifiedName> result = Lists.newArrayList();
for (int i = 0; i < databaseNames.size() && limitSize > 0; i++) {
final String databaseName = databaseNames.get(i);
final List<String> tableNames =
metacatHiveClient.getTableNames(databaseName, filter, limitSize);
limitSize = limitSize - tableNames.size();
result.addAll(tableNames.stream()
.map(n -> QualifiedName.ofTable(name.getCatalogName(), databaseName, n))
.collect(Collectors.toList()));
}
return result;
}
} catch (TException e) {
final String message = String.format("Failed getting the table names for database %s", name);
log.error(message);
throw new ConnectorException(message);
}
}
}
| 1,548 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveStorageFormat.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NonNull;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
import org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat;
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
/**
* Hive storage format.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
@AllArgsConstructor
public enum HiveStorageFormat {
/**
* Optimized Row Columnar.
*/
ORC(OrcSerde.class.getName(),
OrcInputFormat.class.getName(),
OrcOutputFormat.class.getName()),
/**
* PARQUET.
*/
PARQUET(ParquetHiveSerDe.class.getName(),
MapredParquetInputFormat.class.getName(),
MapredParquetOutputFormat.class.getName()),
/**
* RCBINARY.
*/
RCBINARY(LazyBinaryColumnarSerDe.class.getName(),
RCFileInputFormat.class.getName(),
RCFileOutputFormat.class.getName()),
/**
* RCTEXT.
*/
RCTEXT(ColumnarSerDe.class.getName(),
RCFileInputFormat.class.getName(),
RCFileOutputFormat.class.getName()),
/**
* SEQUENCEFILE.
*/
SEQUENCEFILE(LazySimpleSerDe.class.getName(),
SequenceFileInputFormat.class.getName(),
HiveSequenceFileOutputFormat.class.getName()),
/**
* TEXTFILE.
*/
TEXTFILE(LazySimpleSerDe.class.getName(),
TextInputFormat.class.getName(),
HiveIgnoreKeyTextOutputFormat.class.getName());
@NonNull
private final String serde;
@NonNull
private final String inputFormat;
@NonNull
private final String outputFormat;
}
| 1,549 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorPartitionService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.dto.SortOrder;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorPartitionService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest;
import com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.sql.PartitionHolder;
import lombok.Getter;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* HiveConnectorPartitionService.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
public class HiveConnectorPartitionService implements ConnectorPartitionService {
protected final ConnectorContext context;
private final String catalogName;
private final HiveConnectorInfoConverter hiveMetacatConverters;
private final IMetacatHiveClient metacatHiveClient;
/**
* Constructor.
*
* @param context connector context
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
*/
public HiveConnectorPartitionService(
final ConnectorContext context,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
this.catalogName = context.getCatalogName();
this.context = context;
}
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final List<Partition> partitions = getPartitions(tableName,
partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(),
partitionsRequest.getSort(), partitionsRequest.getPageable());
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final TableInfo tableInfo
) {
try {
return metacatHiveClient.getPartitionCount(tableName.getDatabaseName(), tableName.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions count for hive table %s", tableName), e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(),
tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression,
partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
private List<Partition> getPartitions(
final QualifiedName tableName,
@Nullable final String filter,
@Nullable final List<String> partitionIds,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
List<Partition> partitionList = null;
if (!Strings.isNullOrEmpty(filter)) {
partitionList = metacatHiveClient.listPartitionsByFilter(databasename,
tablename, filter);
} else {
if (partitionIds != null) {
partitionList = metacatHiveClient.getPartitions(databasename,
tablename, partitionIds);
}
if (partitionList == null || partitionList.isEmpty()) {
partitionList = metacatHiveClient.getPartitions(databasename,
tablename, null);
}
}
final List<Partition> filteredPartitionList = Lists.newArrayList();
partitionList.forEach(partition -> {
final String partitionName = getNameOfPartition(table, partition);
if (partitionIds == null || partitionIds.contains(partitionName)) {
filteredPartitionList.add(partition);
}
});
if (sort != null) {
if (sort.getOrder() == SortOrder.DESC) {
filteredPartitionList.sort(Collections.reverseOrder());
} else {
Collections.sort(filteredPartitionList);
}
}
return ConnectorUtils.paginate(filteredPartitionList, pageable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionUris(
final ConnectorRequestContext requestContext,
final QualifiedName table,
final PartitionListRequest partitionsRequest,
final TableInfo tableInfo
) {
final List<String> uris = Lists.newArrayList();
for (Partition partition : getPartitions(table, partitionsRequest.getFilter(),
partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable())) {
uris.add(partition.getSd().getLocation());
}
return uris;
}
/**
* By default(checkIfExists=true and aletrIfExists=false), this method adds the provided list of partitions.
* If a partition already exists, it is dropped first before adding it.
* If checkIfExists=false, the method adds the partitions to the table. If a partition already exists,
* an AlreadyExistsException error is thrown.
* If alterIfExists=true, the method updates existing partitions and adds non-existant partitions.
* If a partition in the provided partition list has all the details, then it is used. If the details are missing,
* then the table details are inherited. This is mostly for the storage information.
*/
@Override
public PartitionsSaveResponse savePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableQName,
final PartitionsSaveRequest partitionsSaveRequest
) {
final String databaseName = tableQName.getDatabaseName();
final String tableName = tableQName.getTableName();
final Table table;
try {
table = metacatHiveClient.getTableByName(databaseName, tableName);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableQName, exception);
} catch (TException e) {
throw new ConnectorException(String.format("Failed getting hive table %s", tableQName), e);
}
// New partitions
final List<PartitionInfo> addedPartitionInfos = Lists.newArrayList();
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
final List<String> partitionNames = partitionInfos.stream()
.map(part -> {
final String partitionName = part.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
// New partition names
final List<String> addedPartitionNames = Lists.newArrayList();
// Updated partition names
final List<String> existingPartitionNames = Lists.newArrayList();
// Existing partitions
final List<PartitionHolder> existingPartitionHolders = Lists.newArrayList();
// Existing partition map
Map<String, PartitionHolder> existingPartitionMap = Collections.emptyMap();
//
// If either checkIfExists or alterIfExists is true, check to see if any of the partitions already exists.
// If it exists and if alterIfExists=false, we will drop it before adding.
// If it exists and if alterIfExists=true, we will alter it.
//
if (partitionsSaveRequest.getCheckIfExists() || partitionsSaveRequest.getAlterIfExists()) {
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final PartitionHolder existingPartitionHolder = existingPartitionMap.get(partitionName);
if (existingPartitionHolder == null) {
addedPartitionNames.add(partitionName);
addedPartitionInfos.add(partitionInfo);
} else {
final String partitionUri =
partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
final String existingPartitionUri = getPartitionUri(existingPartitionHolder);
if (partitionUri == null || !partitionUri.equals(existingPartitionUri)) {
existingPartitionNames.add(partitionName);
// We need to copy the existing partition info and
if (partitionInfo.getSerde() == null) {
partitionInfo.setSerde(new StorageInfo());
}
if (partitionInfo.getAudit() == null) {
partitionInfo.setAudit(new AuditInfo());
}
if (StringUtils.isBlank(partitionUri)) {
partitionInfo.getSerde().setUri(existingPartitionUri);
}
//the partition exists, we should not do anything for the partition exists
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
if (existingPartitionHolder.getPartition() != null) {
final Partition existingPartition = existingPartitionHolder.getPartition();
partitionInfo.getSerde().setParameters(existingPartition.getParameters());
partitionInfo.getAudit().setCreatedDate(
HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getCreateTime()));
partitionInfo.getAudit().setLastModifiedDate(
HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getLastAccessTime()));
} else {
final PartitionInfo existingPartitionInfo = existingPartitionHolder.getPartitionInfo();
if (existingPartitionInfo.getSerde() != null) {
partitionInfo.getSerde()
.setParameters(existingPartitionInfo.getSerde().getParameters());
}
if (existingPartitionInfo.getAudit() != null) {
partitionInfo.getAudit()
.setCreatedDate(existingPartitionInfo.getAudit().getCreatedDate());
partitionInfo.getAudit()
.setLastModifiedDate(existingPartitionInfo.getAudit().getLastModifiedDate());
}
}
existingPartitionHolder.setPartitionInfo(partitionInfo);
existingPartitionHolders.add(existingPartitionHolder);
} else {
addedPartitionInfos.add(partitionInfo);
}
}
}
}
final Set<String> deletePartitionNames = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionNames.addAll(existingPartitionNames);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionNames.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders,
deletePartitionNames);
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionNames);
result.setUpdated(existingPartitionNames);
return result;
}
protected void addUpdateDropPartitions(final QualifiedName tableQName,
final Table table,
final List<String> partitionNames,
final List<PartitionInfo> addedPartitionInfos,
final List<PartitionHolder> existingPartitionInfos,
final Set<String> deletePartitionNames) {
final String databaseName = table.getDbName();
final String tableName = table.getTableName();
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableQName, table);
try {
final List<Partition> existingPartitions = existingPartitionInfos.stream()
.map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p.getPartitionInfo()))
.collect(Collectors.toList());
final List<Partition> addedPartitions = addedPartitionInfos.stream()
.map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p)).collect(Collectors.toList());
// If alterIfExists=true, then alter partitions if they already exists
if (!existingPartitionInfos.isEmpty()) {
copyTableSdToPartitionSd(existingPartitions, table);
metacatHiveClient.alterPartitions(databaseName,
tableName, existingPartitions);
}
// Copy the storage details from the table if the partition does not contain the details.
copyTableSdToPartitionSd(addedPartitions, table);
// Drop partitions with ids in 'deletePartitionNames' and add 'addedPartitionInfos' partitions
metacatHiveClient.addDropPartitions(databaseName,
tableName, addedPartitions, Lists.newArrayList(deletePartitionNames));
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableQName, "", exception);
} else {
throw new TableNotFoundException(tableQName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
private String getPartitionUri(final PartitionHolder partition) {
String result = null;
if (partition.getPartition() != null) {
final Partition hivePartition = partition.getPartition();
result = hivePartition.getSd() != null ? hivePartition.getSd().getLocation() : null;
} else if (partition.getPartitionInfo() != null) {
final PartitionInfo partitionInfo = partition.getPartitionInfo();
result = partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public void deletePartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final List<String> partitionNames,
final TableInfo tableInfo
) {
try {
metacatHiveClient.dropPartitions(tableName.getDatabaseName(), tableName.getTableName(), partitionNames);
} catch (MetaException | NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
//not sure which qualified name to use here
throw new ConnectorException(String.format("Failed delete partitions for hive table %s", tableName), e);
}
}
/**
* Returns the list of partition keys.
*
* @param fields fields
* @return partition keys
*/
protected List<String> getPartitionKeys(final List<FieldSchema> fields) {
return (fields != null) ? fields.stream().map(FieldSchema::getName).collect(Collectors.toList())
: Lists.newArrayList();
}
protected Map<String, PartitionHolder> getPartitionsByNames(final Table table,
final List<String> partitionNames) {
final String databasename = table.getDbName();
final String tablename = table.getTableName();
try {
final List<Partition> partitions =
metacatHiveClient.getPartitions(databasename, tablename, partitionNames);
return partitions.stream().map(PartitionHolder::new).collect(Collectors.toMap(part -> {
try {
return Warehouse.makePartName(table.getPartitionKeys(), part.getPartition().getValues());
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}, Function.identity()));
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
//
// Update the partition info based on that of the table.
//
for (Partition partition : hivePartitions) {
final StorageDescriptor sd = partition.getSd();
final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
if (tableSdCopy.getSerdeInfo() == null) {
final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
tableSdCopy.setSerdeInfo(serDeInfo);
}
tableSdCopy.setLocation(sd.getLocation());
if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
tableSdCopy.setInputFormat(sd.getInputFormat());
}
if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
tableSdCopy.setOutputFormat(sd.getOutputFormat());
}
if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
tableSdCopy.setParameters(sd.getParameters());
}
if (sd.getSerdeInfo() != null) {
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
}
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
}
if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
}
}
partition.setSd(tableSdCopy);
}
}
private String getNameOfPartition(final Table table, final Partition partition) {
try {
return Warehouse.makePartName(table.getPartitionKeys(), partition.getValues());
} catch (TException e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}
}
| 1,550 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/IMetacatHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.netflix.metacat.connector.hive.client.embedded.HivePrivilege;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Set;
/**
* IMetacatHiveClient.
*
* @author zhenl
* @since 1.0.0
*/
public interface IMetacatHiveClient {
/**
* Standard error message for all default implementations.
*/
String UNSUPPORTED_MESSAGE = "Not supported for this client";
/**
* Create database.
*
* @param database database metadata
* @throws TException already exist TException
*/
default void createDatabase(final Database database) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Drop database.
*
* @param dbName database name
* @throws TException NotfoundException
*/
default void dropDatabase(final String dbName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @return database database
* @throws TException NotfoundException
*/
default Database getDatabase(final String databaseName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* List all databases.
*
* @return database list
* @throws TException exceptions
*/
default List<String> getAllDatabases() throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get all tables.
*
* @param databaseName databasename
* @return tableNames
* @throws TException metaexception
*/
default List<String> getAllTables(final String databaseName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get all tables.
*
* @param databaseName databasename
* @param filter filter
* @param limit list size
* @return list of table names
* @throws TException metaexception
*/
default List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @param tableName tableName
* @return table information
* @throws TException NotfoundException
*/
default Table getTableByName(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Create table.
*
* @param table database metadata
* @throws TException already exist TException
*/
default void createTable(final Table table) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Delete table.
*
* @param databaseName database
* @param tableName tableName
* @throws TException NotfoundException
*/
default void dropTable(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Rename table.
*
* @param databaseName database
* @param oldTableName tablename
* @param newdatabadeName newdatabase
* @param newTableName newName
* @throws TException NotfoundException
*/
default void rename(final String databaseName,
final String oldTableName,
final String newdatabadeName,
final String newTableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Update table.
*
* @param databaseName databaseName
* @param tableName tableName
* @param table table
* @throws TException if the database does not exist
*/
default void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Update table.
*
* @param databaseName databaseName
* @param database table
* @throws TException if the database does not exist
*/
default void alterDatabase(final String databaseName,
final Database database) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Returns the table.
*
* @param databaseName databaseName
* @param tableName tableName
* @param partitionNames partitionName
* @return list of partitions
* @throws TException TException
*/
default List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Drop a list of partitions.
*
* @param databaseName databaseName
* @param tableName tableName
* @param partitionNames partitionNames
* @throws TException TException
*/
default void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* List partitions.
*
* @param databaseName databaseName
* @param tableName tableName
* @param filter filter
* @return List of partitions
* @throws TException TException
*/
default List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get partition count.
*
* @param databaseName databaseName
* @param tableName tableName
* @return partition count
* @throws TException TException
*/
default int getPartitionCount(final String databaseName,
final String tableName) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Get partition keys.
*
* @param databaseName databaseName
* @param tableName tableName
* @return list of partition names
* @throws TException TException
*/
default List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Save partitions.
*
* @param partitions partitions
* @throws TException TException
*/
default void savePartitions(final List<Partition> partitions)
throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Alter partitions.
*
* @param dbName databaseName
* @param tableName tableName
* @param partitions partitions
* @throws TException TException
*/
default void alterPartitions(final String dbName, final String tableName,
final List<Partition> partitions) throws
TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* addDropPartitions.
*
* @param dbName dbName
* @param tableName tableName
* @param partitions partittions
* @param delPartitionNames deletePartitionNames
* @throws TException TException
*/
default void addDropPartitions(final String dbName, final String tableName,
final List<Partition> partitions,
final List<String> delPartitionNames) throws TException {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* getDatabasePrivileges.
*
* @param user user
* @param databaseName databaseName
* @return set of privilege
*/
default Set<HivePrivilege> getDatabasePrivileges(String user, String databaseName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* getTablePrivileges.
*
* @param user user
* @param tableName databaseName
* @return set of privilege
*/
default Set<HivePrivilege> getTablePrivileges(String user, String tableName) {
throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE);
}
/**
* Clean up any held resources.
*
* @throws TException TException
*/
default void shutdown() throws TException {
}
}
| 1,551 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorDatabaseService.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.server.connectors.ConnectorDatabaseService;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException;
import com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.thrift.TException;
import javax.annotation.Nullable;
import java.util.Comparator;
import java.util.List;
/**
* HiveConnectorDatabaseService.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveConnectorDatabaseService implements ConnectorDatabaseService {
private final IMetacatHiveClient metacatHiveClient;
private final HiveConnectorInfoConverter hiveMetacatConverters;
/**
* Constructor.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
*/
public HiveConnectorDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters
) {
this.metacatHiveClient = metacatHiveClient;
this.hiveMetacatConverters = hiveMetacatConverters;
}
/**
* {@inheritDoc}.
*/
@Override
public void create(final ConnectorRequestContext requestContext, final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
try {
this.metacatHiveClient.createDatabase(hiveMetacatConverters.fromDatabaseInfo(databaseInfo));
} catch (AlreadyExistsException exception) {
throw new DatabaseAlreadyExistsException(databaseName, exception);
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException(databaseName, exception);
} catch (TException exception) {
throw new ConnectorException(
String.format("Failed creating hive database %s", databaseName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
this.metacatHiveClient.dropDatabase(name.getDatabaseName());
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (InvalidOperationException exception) {
throw new MetacatNotSupportedException(exception.getMessage());
} catch (TException exception) {
throw new ConnectorException(String.format("Failed delete hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void update(final ConnectorRequestContext context, final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
try {
this.metacatHiveClient.alterDatabase(databaseName.getDatabaseName(),
hiveMetacatConverters.fromDatabaseInfo(databaseInfo));
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(databaseName, exception);
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException(databaseName, exception);
} catch (TException exception) {
throw new ConnectorException(
String.format("Failed updating hive database %s", databaseName), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public DatabaseInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
try {
final Database database = metacatHiveClient.getDatabase(name.getDatabaseName());
if (database != null) {
return hiveMetacatConverters.toDatabaseInfo(name, database);
} else {
throw new DatabaseNotFoundException(name);
}
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed get hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public boolean exists(final ConnectorRequestContext requestContext, final QualifiedName name) {
boolean result;
try {
result = metacatHiveClient.getDatabase(name.getDatabaseName()) != null;
} catch (NoSuchObjectException exception) {
result = false;
} catch (TException exception) {
throw new ConnectorException(String.format("Failed to check hive database %s exists", name), exception);
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String databaseFilter = (prefix != null) ? prefix.getDatabaseName() : null;
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (databaseFilter != null && !databaseName.startsWith(databaseFilter)) {
continue;
}
qualifiedNames.add(qualifiedName);
}
//supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listName hive database %s", name), exception);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(
final ConnectorRequestContext requestContext,
final QualifiedName name,
@Nullable final QualifiedName prefix,
@Nullable final Sort sort,
@Nullable final Pageable pageable
) {
try {
final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
}
return ConnectorUtils.paginate(databaseInfos, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
}
}
}
| 1,552 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,553 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/commonview/CommonViewHandler.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.commonview;
import com.github.rholder.retry.RetryException;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.connector.hive.converters.HiveTypeConverter;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.spectator.api.Registry;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.CacheEvict;
import java.util.concurrent.ExecutionException;
/**
* CommonViewHandler class.
*
* @author zhenl
*/
//TODO: in case a third iceberg table like object we should refactor them as a common iceberg-like handler
@CacheConfig(cacheNames = "metacat")
public class CommonViewHandler {
private static final Retryer<Void> RETRY_ICEBERG_TABLE_UPDATE = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(TablePreconditionFailedException.class)
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
protected final ConnectorContext connectorContext;
protected final Registry registry;
/**
* CommonViewHandler Config
* Constructor.
*
* @param connectorContext connector context
*/
public CommonViewHandler(final ConnectorContext connectorContext) {
this.connectorContext = connectorContext;
this.registry = connectorContext.getRegistry();
}
/**
* get CommonView Table info.
*
* @param name Common view name
* @param tableLoc table location
* @param tableInfo table info
* @param hiveTypeConverter hive type converter
* @return table info
*/
public TableInfo getCommonViewTableInfo(final QualifiedName name,
final String tableLoc,
final TableInfo tableInfo,
final HiveTypeConverter hiveTypeConverter) {
return TableInfo.builder().name(name).auditInfo(tableInfo.getAudit())
.fields(tableInfo.getFields()).serde(tableInfo.getSerde())
.metadata(tableInfo.getMetadata()).build();
}
/**
* Update common view column comments if the provided tableInfo has updated field comments.
*
* @param tableInfo table information
* @return true if an update is done
*/
public boolean update(final TableInfo tableInfo) {
return false;
}
/**
* Handle common view update request using iceberg table
* update strategy for common views that employs iceberg library.
*
* @param requestContext request context
* @param directSqlTable direct sql table object
* @param tableInfo table info
* @param tableMetadataLocation the common view table metadata location.
*/
@CacheEvict(key = "'iceberg.view.' + #tableMetadataLocation", beforeInvocation = true)
public void handleUpdate(final ConnectorRequestContext requestContext,
final DirectSqlTable directSqlTable,
final TableInfo tableInfo,
final String tableMetadataLocation) {
requestContext.setIgnoreErrorsAfterUpdate(true);
final boolean viewUpdated = this.update(tableInfo);
if (viewUpdated) {
try {
RETRY_ICEBERG_TABLE_UPDATE.call(() -> {
try {
directSqlTable.updateIcebergTable(tableInfo);
} catch (TablePreconditionFailedException e) {
tableInfo.getMetadata()
.put(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION, e.getMetadataLocation());
this.update(tableInfo);
throw e;
}
return null;
});
} catch (RetryException e) {
Throwables.propagate(e.getLastFailedAttempt().getExceptionCause());
} catch (ExecutionException e) {
Throwables.propagate(e.getCause());
}
} else {
directSqlTable.updateIcebergTable(tableInfo);
}
}
}
| 1,554 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/commonview/package-info.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Common view.
*
* @author zhenl
* @since 1.3.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.commonview;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,555 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveConfigConstants.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
/**
* HiveConfigConstants.
*
* @author zhenl
* @since 1.0.0
*/
public final class HiveConfigConstants {
/**
* HIVE_METASTORE_TIMEOUT.
*/
public static final String HIVE_METASTORE_TIMEOUT = "hive.metastore-timeout";
/**
* hive thrift port.
*/
public static final String THRIFT_URI = "hive.metastore.uris";
/**
* USE_EMBEDDED_METASTORE.
*/
public static final String USE_EMBEDDED_METASTORE = "hive.use.embedded.metastore";
/**
* ALLOW_RENAME_TABLE.
*/
public static final String ALLOW_RENAME_TABLE = "hive.allow-rename-table";
/**
* USE_FASTPARTITION_SERVICE.
*/
public static final String USE_FASTHIVE_SERVICE = "hive.use.embedded.fastservice";
/**
* ENABLE_AUDIT_PROCESSING.
*/
public static final String ENABLE_AUDIT_PROCESSING = "hive.use.embedded.fastservice.auditEnabled";
/**
* GET_PARTITION_DETAILS_TIMEOUT.
*/
public static final String GET_PARTITION_DETAILS_TIMEOUT = "hive.use.embedded.GetPartitionDetailsTimeout";
/**
* GET_ICEBERG_PARTITIONS_TIMEOUT.
*/
public static final String GET_ICEBERG_PARTITIONS_TIMEOUT = "hive.iceberg.GetIcebergPartitionsTimeout";
/**
* USE_FAST_DELETION.
*/
public static final String USE_FAST_DELETION = "hive.use.embedded.sql.delete.partitions";
/**
* USE_FASTPARTITION_SERVICE.
*/
public static final String THREAD_POOL_SIZE = "hive.thread.pool.size";
/**
* USE_METASTORE_LOCAL.
*/
public static final String USE_METASTORE_LOCAL = "hive.metastore.local";
/**
* JAVAX_JDO_OPTION_NAME.
*/
public static final String JAVAX_JDO_OPTION_NAME = "javax.jdo.option.name";
/**
* JAVAX_JDO_DATASTORETIMEOUT.
*/
public static final String JAVAX_JDO_DATASTORETIMEOUT = "javax.jdo.option.DatastoreTimeout";
/**
* JAVAX_JDO_DATASTOREREADTIMEOUT.
*/
public static final String JAVAX_JDO_DATASTOREREADTIMEOUT = "javax.jdo.option.DatastoreReadTimeoutMillis";
/**
* JAVAX_JDO_DATASTOREWRITETIMEOUT.
*/
public static final String JAVAX_JDO_DATASTOREWRITETIMEOUT = "javax.jdo.option.DatastoreWriteTimeoutMillis";
/**
* JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS.
*/
public static final String JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS = "javax.jdo.PersistenceManagerFactoryClass";
/**
* JAVAX_JDO_PERSISTENCEMANAGER_FACTORY.
*/
public static final String JAVAX_JDO_PERSISTENCEMANAGER_FACTORY
= "com.netflix.metacat.connector.hive.client.embedded.HivePersistenceManagerFactory";
/**
* HIVE_METASTORE_DS_RETRY.
*/
public static final String HIVE_METASTORE_DS_RETRY = "hive.metastore.ds.retry.attempts";
/**
* HIVE_HMSHANDLER_RETRY.
*/
public static final String HIVE_HMSHANDLER_RETRY = "hive.hmshandler.retry.attempts";
/**
* HIVE_STATS_AUTOGATHER.
*/
public static final String HIVE_STATS_AUTOGATHER = "hive.stats.autogather";
/**
* DATANUCLEUS_AUTOSTARTMECHANISMMODE.
*/
public static final String DATANUCLEUS_AUTOSTARTMECHANISMMODE = "datanucleus.autoStartMechanismMode";
/**
* DATANUCLEUS_DETACHALLONCOMMIT.
*/
public static final String DATANUCLEUS_DETACHALLONCOMMIT = "datanucleus.detachAllOnCommit";
/**
* DATANUCLEUS_DETACHALLONROLLBACK.
*/
public static final String DATANUCLEUS_DETACHALLONROLLBACK = "datanucleus.detachAllOnRollback";
/**
* DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT.
*/
public static final String DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT = "datanucleus.persistenceByReachabilityAtCommit";
/**
* DATANUCLEUS_CACHE_LEVEL2_TYPE.
*/
public static final String DATANUCLEUS_CACHE_LEVEL2_TYPE = "datanucleus.cache.level2.type";
/**
* DATANUCLEUS_CACHE_LEVEL2.
*/
public static final String DATANUCLEUS_CACHE_LEVEL2 = "datanucleus.cache.level2";
/**
* DATANUCLEUS_VALIDATECOLUMNS.
*/
public static final String DATANUCLEUS_VALIDATECOLUMNS = "datanucleus.validateColumns";
/**
* DATANUCLEUS_VALIDATECONSTRAINTS.
*/
public static final String DATANUCLEUS_VALIDATECONSTRAINTS = "datanucleus.validateConstraints";
/**
* DATANUCLEUS_VALIDATETABLE.
*/
public static final String DATANUCLEUS_VALIDATETABLE = "datanucleus.validateTables";
/**
* DATANUCLEUS_TRANSACTIONISOLATION.
*/
public static final String DATANUCLEUS_TRANSACTIONISOLATION = "datanucleus.transactionIsolation";
/**
* DATANUCLEUS_READCOMMITTED.
*/
public static final String DATANUCLEUS_READCOMMITTED = "read-committed";
/**
* DATANUCLEUS_FIXEDDATASTORE.
*/
public static final String DATANUCLEUS_FIXEDDATASTORE = "datanucleus.fixedDatastore";
/**
* DATANUCLEUS_AUTOCREATESCHEMA.
*/
public static final String DATANUCLEUS_AUTOCREATESCHEMA = "datanucleus.autoCreateSchema";
/**
* DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS.
*/
public static final String DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS = "datanucleus.rdbms.CheckExistTablesOrViews";
/**
* DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO.
*/
public static final String DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO = "datanucleus.rdbms.initializeColumnInfo";
/**
* DATANUCLEUS_IDENTIFIERFACTORY.
*/
public static final String DATANUCLEUS_IDENTIFIERFACTORY = "datanucleus.identifierFactory";
/**
* DATANUCLEUS_DATANUCLEU1.
*/
public static final String DATANUCLEUS_DATANUCLEU1 = "datanucleus1";
/**
* DATANUCLEUS_CONNECTIONFACTORY.
*/
public static final String DATANUCLEUS_CONNECTIONFACTORY = "datanucleus.ConnectionFactory";
/**
* DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY.
*/
public static final String DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY
= "datanucleus.rdbms.useLegacyNativeValueStrategy";
/**
* HIVE_HMSHANDLER_NAME.
*/
public static final String HIVE_HMSHANDLER_NAME = "metacat";
/**
* METACAT_JDO_TIMEOUT.
*/
public static final String METACAT_JDO_TIMEOUT = "metacat.jdo.timeout";
/**
* Configuration to convert a table to external on rename table.
*/
public static final String ON_RENAME_CONVERT_TO_EXTERNAL = "metacat.on-rename-convert-to-external";
private HiveConfigConstants() {
}
}
| 1,556 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/PartitionUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.base.Strings;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
/**
* Utility class for partitions.
* @author amajumdar
*/
public final class PartitionUtil {
private PartitionUtil() {
}
/**
* Retrieves the partition values from the partition name. This method also validates the partition keys to that
* of the table.
*
* @param tableQName table name
* @param table table
* @param partName partition name
* @return list of partition values
*/
public static List<String> getPartValuesFromPartName(final QualifiedName tableQName, final Table table,
final String partName) {
if (Strings.isNullOrEmpty(partName)) {
throw new InvalidMetaException(tableQName, partName, null);
}
final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpec, new Path(partName));
final List<String> values = new ArrayList<>();
for (FieldSchema field : table.getPartitionKeys()) {
final String key = field.getName();
final String val = partSpec.get(key);
if (val == null) {
throw new InvalidMetaException(tableQName, partName, null);
}
values.add(val);
}
return values;
}
/**
* Escape partition name.
*
* @param partName partition name
* @return Escaped partition name
*/
public static String escapePartitionName(final String partName) {
final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpec, new Path(partName));
return FileUtils.makePartName(new ArrayList<>(partSpec.keySet()), new ArrayList<>(partSpec.values()));
}
/**
* Generate partition name from the <code>partValues</code>.
*
* @param partitionKeys list of partition keys
* @param partValues list of partition values
* @return partition name
*/
public static String makePartName(final List<FieldSchema> partitionKeys, final List<String> partValues) {
try {
return Warehouse.makePartName(partitionKeys, partValues);
} catch (MetaException e) {
throw new InvalidMetaException("Failed making the part name from the partition values", e);
}
}
}
| 1,557 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveConnectorFastServiceMetric.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Timer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
/**
* Hive Connector Fast Service Metric.
*
* @author zhenl
* @since 1.1.0
*/
@Getter
@Slf4j
public class HiveConnectorFastServiceMetric {
private final HashMap<String, Timer> timerMap = new HashMap<>();
/**
* Constructor.
*
* @param registry the spectator registry
*/
public HiveConnectorFastServiceMetric(final Registry registry) {
timerMap.put(HiveMetrics.TagGetPartitionCount.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionCount.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitionKeys.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionKeys.getMetricName()));
timerMap.put(HiveMetrics.TagGetPartitionNames.getMetricName(), createTimer(registry,
HiveMetrics.TagGetPartitionNames.getMetricName()));
timerMap.put(HiveMetrics.TagTableExists.getMetricName(), createTimer(registry,
HiveMetrics.TagTableExists.getMetricName()));
timerMap.put(HiveMetrics.TagGetTableNames.getMetricName(), createTimer(registry,
HiveMetrics.TagGetTableNames.getMetricName()));
timerMap.put(HiveMetrics.TagAddPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAddPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAlterPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAlterPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagDropHivePartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagDropHivePartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAddDropPartitions.getMetricName(), createTimer(registry,
HiveMetrics.TagAddDropPartitions.getMetricName()));
timerMap.put(HiveMetrics.TagAlterDatabase.getMetricName(), createTimer(registry,
HiveMetrics.TagAlterDatabase.getMetricName()));
}
private Timer createTimer(final Registry registry, final String requestTag) {
final HashMap<String, String> tags = new HashMap<>();
tags.put("request", requestTag);
return registry.timer(registry.createId(HiveMetrics.TimerFastHiveRequest.getMetricName()).withTags(tags));
}
/**
* record the duration to timer.
*
* @param metricName metric name.
* @param duration duration of the operation.
*/
public void recordTimer(final String metricName, final long duration) {
if (this.timerMap.containsKey(metricName)) {
log.debug("### Time taken to complete {} is {} ms", metricName, duration);
this.timerMap.get(metricName).record(duration, TimeUnit.MILLISECONDS);
} else {
log.error("Not supported metric {}", metricName);
}
}
}
| 1,558 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveTableUtil.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.common.server.util.MetacatUtils;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.iceberg.catalog.TableIdentifier;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* HiveTableUtil.
*
* @author zhenl
* @since 1.0.0
*/
@SuppressWarnings("deprecation")
@Slf4j
public final class HiveTableUtil {
private static final String PARQUET_HIVE_SERDE = "parquet.hive.serde.ParquetHiveSerDe";
private HiveTableUtil() {
}
/**
* getTableStructFields.
*
* @param table table
* @return all struct field refs
*/
public static List<? extends StructField> getTableStructFields(final Table table) {
final Properties schema = MetaStoreUtils.getTableMetadata(table);
final String name = schema.getProperty(serdeConstants.SERIALIZATION_LIB);
if (name == null) {
return Collections.emptyList();
}
final Deserializer deserializer = createDeserializer(getDeserializerClass(name));
try {
deserializer.initialize(new Configuration(false), schema);
} catch (SerDeException e) {
throw new RuntimeException("error initializing deserializer: " + deserializer.getClass().getName());
}
try {
final ObjectInspector inspector = deserializer.getObjectInspector();
Preconditions.checkArgument(inspector.getCategory() == ObjectInspector.Category.STRUCT,
"expected STRUCT: %s", inspector.getCategory());
return ((StructObjectInspector) inspector).getAllStructFieldRefs();
} catch (SerDeException e) {
throw Throwables.propagate(e);
}
}
private static Class<? extends Deserializer> getDeserializerClass(final String name) {
// CDH uses different names for Parquet
if (PARQUET_HIVE_SERDE.equals(name)) {
return ParquetHiveSerDe.class;
}
try {
return Class.forName(name, true, JavaUtils.getClassLoader()).asSubclass(Deserializer.class);
} catch (ClassNotFoundException e) {
throw new RuntimeException("deserializer does not exist: " + name);
} catch (ClassCastException e) {
throw new RuntimeException("invalid deserializer class: " + name);
}
}
private static Deserializer createDeserializer(final Class<? extends Deserializer> clazz) {
try {
return clazz.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("error creating deserializer: " + clazz.getName(), e);
}
}
/**
* check if the table is an Iceberg Table.
*
* @param tableInfo table info
* @return true for iceberg table
*/
public static boolean isIcebergTable(final TableInfo tableInfo) {
final String tableTypeVal = getTableType(tableInfo);
return DirectSqlTable.ICEBERG_TABLE_TYPE.equalsIgnoreCase(tableTypeVal);
}
private static String getTableType(final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
final String fallbackTableType = "unknown";
final MetacatRequestContext context = MetacatContextManager.getContext();
final Map<String, String> metadata = tableInfo.getMetadata();
if (metadata == null) {
context.updateTableTypeMap(tableName, fallbackTableType);
return null;
}
String tableType = metadata.get(DirectSqlTable.PARAM_TABLE_TYPE);
if (StringUtils.isBlank(tableType)) {
tableType = fallbackTableType;
}
context.updateTableTypeMap(tableName, tableType);
return tableType;
}
/**
* get iceberg table metadata location.
*
* @param tableInfo table info
* @return iceberg table metadata location
*/
public static String getIcebergTableMetadataLocation(final TableInfo tableInfo) {
return tableInfo.getMetadata().get(DirectSqlTable.PARAM_METADATA_LOCATION);
}
/**
* Convert qualified name to table identifier.
*
* @param name qualified name
* @return table identifier
*/
public static TableIdentifier qualifiedNameToTableIdentifier(final QualifiedName name) {
return TableIdentifier.parse(name.toString().replace('/', '.'));
}
/** check if the table is a common view.
*
* @param tableInfo table info
* @return true for common view
*/
public static boolean isCommonView(final TableInfo tableInfo) {
return tableInfo != null && tableInfo.getMetadata() != null
&& MetacatUtils.isCommonView(tableInfo.getMetadata());
}
/**
* get common view metadata location.
*
* @param tableInfo table info
* @return common view metadata location
*/
public static String getCommonViewMetadataLocation(final TableInfo tableInfo) {
return tableInfo.getMetadata().get(DirectSqlTable.PARAM_METADATA_LOCATION);
}
/**
* Throws an invalid meta exception
* if the metadata for a table is null or empty.
*
* @param tableName the table name.
* @param metadata the table metadata.
*/
public static void throwIfTableMetadataNullOrEmpty(final QualifiedName tableName,
final Map<String, String> metadata) {
if (metadata == null || metadata.isEmpty()) {
final String message = String.format("No parameters defined for iceberg table %s", tableName);
log.warn(message);
throw new InvalidMetaException(tableName, message, null);
}
}
}
| 1,559 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HivePartitionKeyParserEval.java
|
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import org.apache.hadoop.hive.common.FileUtils;
/**
* Hive partition key evaluation.
*/
public class HivePartitionKeyParserEval extends PartitionKeyParserEval {
@Override
protected String toValue(final Object value) {
return value == null ? PartitionUtil.DEFAULT_PARTITION_NAME
: FileUtils.escapePathName(value.toString());
}
}
| 1,560 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/IcebergFilterGenerator.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.types.Types;
import com.netflix.metacat.common.server.partition.parser.ASTAND;
import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN;
import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE;
import com.netflix.metacat.common.server.partition.parser.ASTIN;
import com.netflix.metacat.common.server.partition.parser.ASTLIKE;
import com.netflix.metacat.common.server.partition.parser.ASTMATCHES;
import com.netflix.metacat.common.server.partition.parser.ASTNOT;
import com.netflix.metacat.common.server.partition.parser.ASTOR;
import com.netflix.metacat.common.server.partition.parser.ASTVAR;
import com.netflix.metacat.common.server.partition.parser.SimpleNode;
import com.netflix.metacat.common.server.partition.parser.Variable;
import com.netflix.metacat.common.server.partition.visitor.PartitionParserEval;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import java.math.BigDecimal;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Iceberg Filter generator.
*/
public class IcebergFilterGenerator extends PartitionParserEval {
private static final Set<String> ICEBERG_TIMESTAMP_NAMES
= ImmutableSet.of("dateCreated", "lastUpdated");
private final Map<String, Types.NestedField> fieldMap;
/**
* Constructor.
*
* @param fields partition fields
*/
public IcebergFilterGenerator(final List<Types.NestedField> fields) {
fieldMap = Maps.newHashMap();
for (final Types.NestedField field : fields) {
fieldMap.put(field.name(), field);
}
}
@Override
public Object visit(final ASTAND node, final Object data) {
return Expressions.and((Expression) node.jjtGetChild(0).jjtAccept(this, data),
(Expression) node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTOR node, final Object data) {
return Expressions.or((Expression) node.jjtGetChild(0).jjtAccept(this, data),
(Expression) node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTCOMPARE node, final Object data) {
if (node.jjtGetNumChildren() == 1) {
return evalSingleTerm(node, data).toString();
} else {
return evalString(node, data);
}
}
@Override
public Object visit(final ASTVAR node, final Object data) {
return ((Variable) node.jjtGetValue()).getName();
}
@Override
public Object visit(final ASTBETWEEN node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
final Object startValue = node.jjtGetChild(1).jjtAccept(this, data);
final Object endValue = node.jjtGetChild(2).jjtAccept(this, data);
final Expression compare1 =
createIcebergExpression(value, startValue, node.not ? Compare.LT : Compare.GTE);
final Expression compare2 =
createIcebergExpression(value, endValue, node.not ? Compare.GT : Compare.LTE);
return (node.not)
? Expressions.or(compare1, compare2) : Expressions.and(compare1, compare2);
}
@Override
public Object visit(final ASTIN node, final Object data) {
throw new UnsupportedOperationException("IN Operator not supported");
}
@Override
public Object visit(final ASTMATCHES node, final Object data) {
throw new UnsupportedOperationException("Operator Not supported");
}
@Override
public Object visit(final ASTNOT node, final Object data) {
throw new UnsupportedOperationException("Operator Not supported");
}
@Override
public Object visit(final ASTLIKE node, final Object data) {
throw new UnsupportedOperationException("Not supported");
}
private Expression evalSingleTerm(final ASTCOMPARE node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
if (value != null) {
return Boolean.parseBoolean(value.toString())
? Expressions.alwaysTrue() : Expressions.alwaysFalse();
}
return Expressions.alwaysFalse();
}
/**
* evalString.
*
* @param node node
* @param data data
* @return eval String
*/
private Expression evalString(final SimpleNode node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(2).jjtAccept(this, data);
return createIcebergExpression(lhs, rhs, comparison);
}
/**
* Check if the key is part of field.
*
* @param key input string
* @return True if key is a field.
*/
private boolean isField(final Object key) {
return (key instanceof String) && fieldMap.containsKey(((String) key).toLowerCase());
}
/**
* Check if the key is an iceberg supported date filter field.
*
* @param key input string
* @return True if key is an iceberg supported date filter field.
*/
private boolean isIcebergTimestamp(final Object key) {
return (key instanceof String) && ICEBERG_TIMESTAMP_NAMES.contains(key);
}
/**
* Get the key and value field of iceberg expression.
*
* @param lhs left hand string
* @param rhs right hand string
* @return key value pair for iceberg expression.
*/
private Pair<String, Object> getExpressionKeyValue(final Object lhs,
final Object rhs) {
if (isIcebergTimestamp(lhs)) {
return new ImmutablePair<>(lhs.toString(), ((BigDecimal) rhs).longValue());
} else if (isIcebergTimestamp(rhs)) {
return new ImmutablePair<>(rhs.toString(), ((BigDecimal) lhs).longValue());
}
if (isField(lhs)) {
return new ImmutablePair<>(lhs.toString(), getValue(lhs.toString(), rhs));
} else if (isField(rhs)) {
return new ImmutablePair<>(rhs.toString(), getValue(rhs.toString(), lhs));
}
throw new IllegalArgumentException(
String.format("Invalid input \"%s/%s\" filter must be columns in fields %s or %s",
lhs, rhs, fieldMap.keySet().toString(), ICEBERG_TIMESTAMP_NAMES.toString()));
}
/**
* Transform the value type to iceberg type.
*
* @param key the input filter key
* @param value the input filter value
* @return iceberg type
*/
private Object getValue(final String key, final Object value) {
if (value instanceof BigDecimal) {
switch (fieldMap.get(key).type().typeId()) {
case LONG:
return ((BigDecimal) value).longValue();
case INTEGER:
return ((BigDecimal) value).intValue();
case DOUBLE:
return ((BigDecimal) value).doubleValue();
case FLOAT:
return ((BigDecimal) value).floatValue();
case DECIMAL:
return value;
default:
throw new IllegalArgumentException("Cannot convert the given big decimal value to an Iceberg type");
}
}
return value;
}
/**
* Based on filter create iceberg expression.
*
* @param lhs left hand string
* @param rhs right hand string
* @param comparison comparing operator
* @return iceberg expression
*/
private Expression createIcebergExpression(final Object lhs,
final Object rhs,
final Compare comparison) {
final Pair<String, Object> keyValue = getExpressionKeyValue(lhs, rhs);
final String key = keyValue.getLeft();
final Object value = keyValue.getRight();
switch (comparison) {
case EQ:
return Expressions.equal(key, value);
case LTE:
return Expressions.lessThanOrEqual(key, value);
case GTE:
return Expressions.greaterThanOrEqual(key, value);
case GT:
return Expressions.greaterThan(key, value);
case LT:
return Expressions.lessThan(key, value);
case NEQ:
return Expressions.notEqual(key, value);
default:
throw new UnsupportedOperationException(String.format("Operator %s supported", comparison));
}
}
}
| 1,561 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveFilterPartition.java
|
package com.netflix.metacat.connector.hive.util;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import org.apache.hadoop.hive.common.FileUtils;
import java.util.Map;
/**
* Filter partition for hive.
*
* @author amajumdar
*/
public class HiveFilterPartition extends FilterPartition {
@Override
protected void addNameValues(final String name, final Map<String, String> values) {
super.addNameValues(name, values);
values.replaceAll((key, value) -> value == null ? null : FileUtils.unescapePathName(value));
}
}
| 1,562 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/PartitionFilterGenerator.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.util;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.partition.parser.ASTAND;
import com.netflix.metacat.common.server.partition.parser.ASTBETWEEN;
import com.netflix.metacat.common.server.partition.parser.ASTCOMPARE;
import com.netflix.metacat.common.server.partition.parser.ASTIN;
import com.netflix.metacat.common.server.partition.parser.ASTLIKE;
import com.netflix.metacat.common.server.partition.parser.ASTMATCHES;
import com.netflix.metacat.common.server.partition.parser.ASTNOT;
import com.netflix.metacat.common.server.partition.parser.ASTNULL;
import com.netflix.metacat.common.server.partition.parser.ASTOR;
import com.netflix.metacat.common.server.partition.parser.ASTVAR;
import com.netflix.metacat.common.server.partition.parser.SimpleNode;
import com.netflix.metacat.common.server.partition.parser.Variable;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.common.server.partition.visitor.PartitionParserEval;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.serde.serdeConstants;
import java.math.BigDecimal;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.Map;
/**
* PartitionFilterGenerator.
*
* @author zhenl
* @since 1.0.0
*/
public class PartitionFilterGenerator extends PartitionParserEval {
private final Map<String, PartitionCol> partitionColumns;
private final List<Object> params;
private List<String> partVals;
private boolean optimized;
private final boolean escapePartitionNameOnFilter;
/**
* Constructor.
*
* @param partitionsKeys partition keys
* @param escapePartitionNameOnFilter if true, escape the partition name
*/
public PartitionFilterGenerator(final List<FieldSchema> partitionsKeys, final boolean escapePartitionNameOnFilter) {
partitionColumns = Maps.newHashMap();
this.partVals = Lists.newArrayListWithCapacity(partitionsKeys.size());
for (int index = 0; index < partitionsKeys.size(); index++) {
final FieldSchema partitionKey = partitionsKeys.get(index);
partitionColumns.put(partitionKey.getName().toLowerCase(), new PartitionCol(index, partitionKey.getType()));
this.partVals.add(null);
}
this.params = Lists.newArrayList();
this.optimized = true;
this.escapePartitionNameOnFilter = escapePartitionNameOnFilter;
}
/**
* evalString.
*
* @param node node
* @param data data
* @return eval String
*/
public String evalString(final SimpleNode node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(2).jjtAccept(this, data);
return createSqlCriteria(lhs, rhs, comparison, false);
}
private String createSqlCriteria(final Object lhs, final Object rhs, final Compare comparison, final boolean not) {
String key = null;
Object value = null;
boolean isKeyLhs = true;
//
// lhs, rhs or both can be keys
//
if (lhs instanceof String && isKey((String) lhs)) {
key = lhs.toString();
value = rhs;
} else if (rhs instanceof String && isKey((String) rhs)) {
key = rhs.toString();
value = lhs;
isKeyLhs = false;
}
if (key == null || value == null) {
throw new RuntimeException("Invalid expression key/value " + lhs + "/" + rhs);
}
final PartitionCol partCol = partitionColumns.get(key.toLowerCase());
final String valueStr = value.toString();
final String operator = not ? "not " + comparison.getExpression() : comparison.getExpression();
if (partCol != null && valueStr != null && (partitionColumns.containsKey(valueStr.toLowerCase()))) {
// Key part column
partCol.occurred();
final FilterType colType = partCol.type;
optimized = false;
// Value part column
final PartitionCol valuePartCol = partitionColumns.get(valueStr);
valuePartCol.occurred();
final FilterType valueColType = valuePartCol.type;
if (colType != valueColType) {
throw new RuntimeException(
String.format("Invalid column comparison with key as %s and"
+ " value as %s", colType, valueColType));
}
return String.format("%s %s %s", getSQLExpression(partCol), operator, getSQLExpression(valuePartCol));
} else if (partCol != null) {
partCol.occurred();
// For more optimization
if (partCol.hasOccurredOnlyOnce() && Compare.EQ.equals(comparison)) {
partVals.set(partCol.index, key + "="
+ (escapePartitionNameOnFilter ? FileUtils.escapePathName(valueStr) : valueStr));
} else {
optimized = false;
}
final FilterType colType = partCol.type;
if (colType == FilterType.Invalid) {
throw new RuntimeException("Invalid type " + colType);
}
FilterType valType = FilterType.fromClass(value);
if (valType == FilterType.Invalid) {
throw new RuntimeException("Invalid value " + value.getClass());
}
if (colType == FilterType.Date && valType == FilterType.String) {
try {
value = new java.sql.Date(
HiveMetaStore.PARTITION_DATE_FORMAT.get().parse((String) value).getTime());
valType = FilterType.Date;
} catch (ParseException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.Timestamp && valType == FilterType.String) {
try {
value = new java.sql.Timestamp(
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse((String) value).getTime());
valType = FilterType.Timestamp;
} catch (ParseException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.Integral && valType == FilterType.String) {
try {
value = new BigDecimal((String) value);
valType = FilterType.Integral;
} catch (NumberFormatException pe) { // do nothing, handled below - types will mismatch
}
} else if (colType == FilterType.String && valType != FilterType.String) {
value = value.toString();
valType = FilterType.String;
}
if (colType != valType) {
throw new RuntimeException("Invalid value " + value.getClass());
}
key = getSQLExpression(partCol);
params.add(value);
} else if ("batchid".equalsIgnoreCase(key)) {
return "1=1";
} else if ("dateCreated".equalsIgnoreCase(key)) {
optimized = false;
key = "p.CREATE_TIME";
params.add(value);
} else {
throw new RuntimeException("Invalid expression key " + key);
}
return isKeyLhs ? String.format("%s %s %s", key, operator, "?")
: String.format("%s %s %s", "?", operator, key);
}
private String getSQLExpression(final PartitionCol partCol) {
String result = "pv" + partCol.index + ".part_key_val";
if (partCol.type != FilterType.String) {
if (partCol.type == FilterType.Integral) {
result = "cast(" + result + " as decimal(21,0))";
} else if (partCol.type == FilterType.Date) {
result = "cast(" + result + " as date)";
} else if (partCol.type == FilterType.Timestamp) {
result = "cast(" + result + " as timestamp)";
}
}
return result;
}
private boolean isKey(final String key) {
return partitionColumns.containsKey(
key.toLowerCase()) || "batchid".equalsIgnoreCase(key) || "dateCreated".equalsIgnoreCase(key);
}
public List<Object> getParams() {
return params;
}
/**
* joinSQL.
*
* @return joined sql
*/
public String joinSql() {
final StringBuilder result = new StringBuilder();
if (!isOptimized()) {
partitionColumns.values().forEach(partCol -> {
if (partCol.hasOccurred()) {
final String tableAlias = "pv" + partCol.index;
result.append(" join PARTITION_KEY_VALS as ").append(tableAlias)
.append(" on p.part_id=").append(tableAlias).append(".part_id and ")
.append(tableAlias).append(".integer_idx=").append(partCol.index);
}
});
}
return result.toString();
}
public boolean isOptimized() {
return optimized;
}
/**
* getOptimizedSql.
*
* @return get Optimized Sql
*/
public String getOptimizedSql() {
final StringBuilder result = new StringBuilder();
boolean likeExpression = false;
boolean emptyPartVals = true;
if (isOptimized()) {
for (int i = 0; i < partVals.size(); i++) {
final String partVal = partVals.get(i);
if (partVal == null) {
likeExpression = true;
result.append("%");
} else {
emptyPartVals = false;
result.append(partVal);
if (i + 1 != partVals.size()) {
result.append("/");
}
}
}
}
if (emptyPartVals) {
return result.toString();
} else if (likeExpression) {
params.clear();
params.add(result.toString());
return "p.part_name like ?";
} else {
params.clear();
params.add(result.toString());
return "p.part_name = ?";
}
}
@Override
public Object visit(final ASTAND node, final Object data) {
return String.format("(%s %s %s)", node.jjtGetChild(0).jjtAccept(this, data), "and",
node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTOR node, final Object data) {
optimized = false;
return String.format("(%s %s %s)", node.jjtGetChild(0).jjtAccept(this, data), "or",
node.jjtGetChild(1).jjtAccept(this, data));
}
@Override
public Object visit(final ASTCOMPARE node, final Object data) {
if (node.jjtGetNumChildren() == 1) {
return evalSingleTerm(node, data).toString();
} else {
return evalString(node, data);
}
}
private Boolean evalSingleTerm(final ASTCOMPARE node, final Object data) {
Boolean result = Boolean.FALSE;
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
if (value != null) {
result = Boolean.parseBoolean(value.toString());
}
return result;
}
@Override
public Object visit(final ASTBETWEEN node, final Object data) {
final Object value = node.jjtGetChild(0).jjtAccept(this, data);
final Object startValue = node.jjtGetChild(1).jjtAccept(this, data);
final Object endValue = node.jjtGetChild(2).jjtAccept(this, data);
final String compare1 = createSqlCriteria(value, startValue, node.not ? Compare.LT : Compare.GTE, false);
final String compare2 = createSqlCriteria(value, endValue, node.not ? Compare.GT : Compare.LTE, false);
return String.format("(%s %s %s)", compare1, node.not ? "or" : "and", compare2);
}
@Override
public Object visit(final ASTIN node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final StringBuilder builder = new StringBuilder();
for (int i = 1; i < node.jjtGetNumChildren(); i++) {
final Object inValue = node.jjtGetChild(i).jjtAccept(this, data);
if (i != 1) {
builder.append(",");
}
if (inValue instanceof String) {
builder.append("'").append(inValue).append("'");
} else {
builder.append(inValue);
}
}
final PartitionCol partCol = partitionColumns.get(lhs.toString().toLowerCase());
if (partCol != null) {
partCol.occurred();
optimized = false;
final String operator = node.not ? "not in" : "in";
return String.format("%s %s (%s)", getSQLExpression(partCol), operator, builder.toString());
} else {
throw new RuntimeException("Invalid expression key " + lhs);
}
}
@Override
public Object visit(final ASTLIKE node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
final Object rhs = node.jjtGetChild(1).jjtAccept(this, data);
return createSqlCriteria(lhs, rhs, Compare.LIKE, node.not);
}
@Override
public Object visit(final ASTNULL node, final Object data) {
final Object lhs = node.jjtGetChild(0).jjtAccept(this, data);
return createSqlCriteria(lhs, PartitionUtil.DEFAULT_PARTITION_NAME, Compare.EQ, node.not);
}
@Override
public Object visit(final ASTVAR node, final Object data) {
return ((Variable) node.jjtGetValue()).getName();
}
@Override
public Object visit(final ASTMATCHES node, final Object data) {
throw new RuntimeException("Not supported");
}
@Override
public Object visit(final ASTNOT node, final Object data) {
throw new RuntimeException("Not supported");
}
private enum FilterType {
Integral,
String,
Date,
Timestamp,
Invalid;
static FilterType fromType(final String colTypeStr) {
if (colTypeStr.equals(serdeConstants.STRING_TYPE_NAME)) {
return FilterType.String;
} else if (colTypeStr.equals(serdeConstants.DATE_TYPE_NAME)) {
return FilterType.Date;
} else if (colTypeStr.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
return FilterType.Timestamp;
} else if (serdeConstants.IntegralTypes.contains(colTypeStr)) {
return FilterType.Integral;
}
return FilterType.Invalid;
}
public static FilterType fromClass(final Object value) {
if (value instanceof String) {
return FilterType.String;
} else if (value instanceof Number) {
return FilterType.Integral;
} else if (value instanceof java.sql.Date) {
return FilterType.Date;
} else if (value instanceof java.sql.Timestamp) {
return FilterType.Timestamp;
}
return FilterType.Invalid;
}
}
static class PartitionCol {
private int index;
private FilterType type;
private int occurrences;
PartitionCol(final int index, final String type) {
this.index = index;
this.type = FilterType.fromType(type);
}
void occurred() {
occurrences++;
}
boolean hasOccurred() {
return occurrences > 0;
}
boolean hasOccurredOnlyOnce() {
return occurrences == 1;
}
}
}
| 1,563 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* com.netflix.metacat.connector.hive.util.
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.util;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,564 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveConnectorInfoConverter.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.server.connectors.ConnectorInfoConverter;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.DatabaseInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.connectors.model.ViewInfo;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableWrapper;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import java.time.Instant;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Hive connector info converter.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveConnectorInfoConverter implements ConnectorInfoConverter<Database, Table, Partition> {
private static final Splitter SLASH_SPLITTER = Splitter.on('/');
private static final Splitter EQUAL_SPLITTER = Splitter.on('=').limit(2);
private HiveTypeConverter hiveTypeConverter = new HiveTypeConverter();
/**
* Constructor.
*
* @param hiveTypeConverter typeconverter
*/
public HiveConnectorInfoConverter(final HiveTypeConverter hiveTypeConverter) {
this.hiveTypeConverter = hiveTypeConverter;
}
/**
* Converts epoch time to Date.
*
* @param seconds time in seconds
* @return Date
*/
public static Date epochSecondsToDate(final long seconds) {
return Date.from(Instant.ofEpochSecond(seconds));
}
/**
* Converts to DatabaseDto.
*
* @param database connector database
* @return Metacat database Info
*/
@Override
public DatabaseInfo toDatabaseInfo(
final QualifiedName qualifiedName,
final Database database
) {
return DatabaseInfo.builder()
.name(qualifiedName)
.uri(database.getLocationUri())
.metadata(database.getParameters())
.build();
}
/**
* Converts from DatabaseDto to the connector database.
*
* @param databaseInfo Metacat database Info
* @return connector database
*/
@Override
public Database fromDatabaseInfo(final DatabaseInfo databaseInfo) {
final QualifiedName databaseName = databaseInfo.getName();
final String name = (databaseName == null) ? "" : databaseName.getDatabaseName();
//this is a temp hack to resolve the uri = null issue
// final String dbUri = Strings.isNullOrEmpty(databaseInfo.getUri()) ? "file://temp/" : databaseInfo.getUri();
final Map<String, String> metadata
= (databaseInfo.getMetadata() != null) ? databaseInfo.getMetadata() : Collections.EMPTY_MAP;
return new Database(name, name, databaseInfo.getUri(), metadata);
}
/**
* Converts to TableDto.
*
* @param table connector table
* @return Metacat table Info
*/
@Override
public TableInfo toTableInfo(final QualifiedName name, final Table table) {
final List<FieldSchema> nonPartitionColumns =
(table.getSd() != null) ? table.getSd().getCols() : Collections.emptyList();
// add the data fields to the nonPartitionColumns
//ignore all exceptions
try {
if (nonPartitionColumns.isEmpty()) {
for (StructField field : HiveTableUtil.getTableStructFields(table)) {
final FieldSchema fieldSchema = new FieldSchema(field.getFieldName(),
field.getFieldObjectInspector().getTypeName(),
field.getFieldComment());
nonPartitionColumns.add(fieldSchema);
}
}
} catch (final Exception e) {
log.error(e.getMessage(), e);
}
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final Date creationDate = table.isSetCreateTime() ? epochSecondsToDate(table.getCreateTime()) : null;
final List<FieldInfo> allFields =
Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream()
.map(field -> hiveToMetacatField(field, false))
.forEachOrdered(allFields::add);
partitionColumns.stream()
.map(field -> hiveToMetacatField(field, true))
.forEachOrdered(allFields::add);
final AuditInfo auditInfo = AuditInfo.builder().createdDate(creationDate).build();
if (null != table.getTableType() && table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
return TableInfo.builder()
.serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields)
.metadata(table.getParameters()).name(name).auditInfo(auditInfo)
.view(ViewInfo.builder().
viewOriginalText(table.getViewOriginalText())
.viewExpandedText(table.getViewExpandedText()).build()
).build();
} else {
return TableInfo.builder()
.serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields)
.metadata(table.getParameters()).name(name).auditInfo(auditInfo)
.build();
}
}
/**
* Converts IcebergTable to TableDto.
*
* @param name qualified name
* @param tableWrapper iceberg table wrapper containing the table info and extra properties
* @param tableLoc iceberg table metadata location
* @param tableInfo table info
* @return Metacat table Info
*/
public TableInfo fromIcebergTableToTableInfo(final QualifiedName name,
final IcebergTableWrapper tableWrapper,
final String tableLoc,
final TableInfo tableInfo) {
final org.apache.iceberg.Table table = tableWrapper.getTable();
final List<FieldInfo> allFields =
this.hiveTypeConverter.icebergeSchemaTofieldDtos(table.schema(), table.spec().fields());
final Map<String, String> tableParameters = new HashMap<>();
tableParameters.put(DirectSqlTable.PARAM_TABLE_TYPE, DirectSqlTable.ICEBERG_TABLE_TYPE);
tableParameters.put(DirectSqlTable.PARAM_METADATA_LOCATION, tableLoc);
tableParameters.put(DirectSqlTable.PARAM_PARTITION_SPEC, table.spec().toString());
//adding iceberg table properties
tableParameters.putAll(table.properties());
tableParameters.putAll(tableWrapper.getExtraProperties());
final StorageInfo.StorageInfoBuilder storageInfoBuilder = StorageInfo.builder();
if (tableInfo.getSerde() != null) {
// Adding the serde properties to support old engines.
storageInfoBuilder.inputFormat(tableInfo.getSerde().getInputFormat())
.outputFormat(tableInfo.getSerde().getOutputFormat())
.uri(tableInfo.getSerde().getUri())
.serializationLib(tableInfo.getSerde().getSerializationLib());
}
return TableInfo.builder().fields(allFields)
.metadata(tableParameters)
.serde(storageInfoBuilder.build())
.name(name).auditInfo(tableInfo.getAudit())
.build();
}
/**
* Converts from TableDto to the connector table.
*
* @param tableInfo Metacat table Info
* @return connector table
*/
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
final QualifiedName name = tableInfo.getName();
final String tableName = (name != null) ? name.getTableName() : "";
final String databaseName = (name != null) ? name.getDatabaseName() : "";
final StorageInfo storageInfo = tableInfo.getSerde();
final String owner = (storageInfo != null && storageInfo.getOwner() != null)
? storageInfo.getOwner() : "";
final AuditInfo auditInfo = tableInfo.getAudit();
final int createTime = (auditInfo != null && auditInfo.getCreatedDate() != null)
? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final Map<String, String> params = (tableInfo.getMetadata() != null)
? tableInfo.getMetadata() : new HashMap<>();
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> partitionFields = Collections.emptyList();
List<FieldSchema> nonPartitionFields = Collections.emptyList();
if (fields != null) {
nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
partitionFields = Lists.newArrayListWithCapacity(fields.size());
for (FieldInfo fieldInfo : fields) {
if (fieldInfo.isPartitionKey()) {
partitionFields.add(metacatToHiveField(fieldInfo));
} else {
nonPartitionFields.add(metacatToHiveField(fieldInfo));
}
}
}
final StorageDescriptor sd = fromStorageInfo(storageInfo, nonPartitionFields);
final ViewInfo viewInfo = tableInfo.getView();
final String tableType = (null != viewInfo
&& !Strings.isNullOrEmpty(viewInfo.getViewOriginalText()))
? TableType.VIRTUAL_VIEW.name() : TableType.EXTERNAL_TABLE.name();
return new Table(tableName,
databaseName,
owner,
createTime,
0,
0,
sd,
partitionFields,
params,
tableType.equals(TableType.VIRTUAL_VIEW.name())
? tableInfo.getView().getViewOriginalText() : null,
tableType.equals(TableType.VIRTUAL_VIEW.name())
? tableInfo.getView().getViewExpandedText() : null,
tableType);
}
/**
* Converts to PartitionDto.
*
* @param partition connector partition
* @return Metacat partition Info
*/
@Override
public PartitionInfo toPartitionInfo(
final TableInfo tableInfo,
final Partition partition
) {
final QualifiedName tableName = tableInfo.getName();
final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(),
tableName.getTableName(),
getNameFromPartVals(tableInfo, partition.getValues()));
final String owner = notNull(tableInfo.getSerde()) ? tableInfo.getSerde().getOwner() : "";
final AuditInfo auditInfo = AuditInfo.builder()
.createdDate(epochSecondsToDate(partition.getCreateTime()))
.lastModifiedDate(epochSecondsToDate(partition.getLastAccessTime())).build();
return PartitionInfo.builder()
.serde(toStorageInfo(partition.getSd(), owner))
.name(partitionName)
.auditInfo(auditInfo)
.metadata(partition.getParameters())
.build();
}
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(
final TableInfo tableInfo,
final PartitionInfo partition
) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
//can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream()
.filter(field -> !field.isPartitionKey())
.map(this::metacatToHiveField)
.collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
//using the table level serialization lib
if (
notNull(sd.getSerdeInfo())
&& notNull(tableInfo.getSerde())
&& Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())
) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate()))
? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate()))
? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2,
"Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(
values,
databaseName,
tableName,
createTime,
lastAccessTime,
sd,
metadata);
}
/**
* metacatToHiveField.
*
* @param fieldInfo fieldInfo
* @return FieldSchema
*/
public FieldSchema metacatToHiveField(final FieldInfo fieldInfo) {
final FieldSchema result = new FieldSchema();
result.setName(fieldInfo.getName());
if (StringUtils.isBlank(fieldInfo.getSourceType())) {
result.setType(hiveTypeConverter.fromMetacatType(fieldInfo.getType()));
} else {
result.setType(fieldInfo.getSourceType());
}
result.setComment(fieldInfo.getComment());
return result;
}
/**
* hiveToMetacatField.
*
* @param field field
* @param isPartitionKey boolean
* @return field info obj
*/
private FieldInfo hiveToMetacatField(final FieldSchema field, final boolean isPartitionKey) {
return FieldInfo.builder().name(field.getName())
.type(hiveTypeConverter.toMetacatType(field.getType()))
.sourceType(field.getType())
.comment(field.getComment())
.partitionKey(isPartitionKey)
.build();
}
private StorageInfo toStorageInfo(final StorageDescriptor sd, final String owner) {
if (sd == null) {
return new StorageInfo();
}
if (sd.getSerdeInfo() != null) {
return StorageInfo.builder().owner(owner)
.uri(sd.getLocation())
.inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat())
.parameters(sd.getParameters())
.serializationLib(sd.getSerdeInfo().getSerializationLib())
.serdeInfoParameters(sd.getSerdeInfo().getParameters())
.build();
}
return StorageInfo.builder().owner(owner).uri(sd.getLocation()).inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat()).parameters(sd.getParameters()).build();
}
@VisibleForTesting
Integer dateToEpochSeconds(final Date date) {
return null == date ? null : Math.toIntExact(date.toInstant().getEpochSecond());
}
private StorageDescriptor fromStorageInfo(final StorageInfo storageInfo, final List<FieldSchema> cols) {
if (storageInfo == null) {
return new StorageDescriptor(
Collections.emptyList(),
"",
null,
null,
false,
0,
new SerDeInfo("", null, new HashMap<>()),
Collections.emptyList(),
Collections.emptyList(),
new HashMap<>());
}
// Set all required fields to a non-null value
final String inputFormat = storageInfo.getInputFormat();
final String location = notNull(storageInfo.getUri()) ? storageInfo.getUri() : "";
final String outputFormat = storageInfo.getOutputFormat();
final Map<String, String> sdParams = notNull(storageInfo.getParameters())
? storageInfo.getParameters() : new HashMap<>();
final Map<String, String> serdeParams = notNull(storageInfo.getSerdeInfoParameters())
? storageInfo.getSerdeInfoParameters() : new HashMap<>();
final String serializationLib = storageInfo.getSerializationLib();
return new StorageDescriptor(
cols,
location,
inputFormat,
outputFormat,
false,
0,
new SerDeInfo("", serializationLib, serdeParams),
Collections.emptyList(),
Collections.emptyList(),
sdParams);
}
private String getNameFromPartVals(final TableInfo tableInfo, final List<String> partVals) {
final List<String> partitionKeys = getPartitionKeys(tableInfo.getFields());
if (partitionKeys.size() != partVals.size()) {
throw new IllegalArgumentException("Not the same number of partition columns and partition values");
}
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < partitionKeys.size(); i++) {
if (builder.length() > 0) {
builder.append('/');
}
builder.append(partitionKeys.get(i))
.append('=')
.append(partVals.get(i));
}
return builder.toString();
}
private List<String> getPartitionKeys(final List<FieldInfo> fieldInfos) {
if (fieldInfos == null) {
return null;
} else if (fieldInfos.isEmpty()) {
return Collections.emptyList();
}
final List<String> keys = new LinkedList<>();
for (FieldInfo field : fieldInfos) {
if (field.isPartitionKey()) {
keys.add(field.getName());
}
}
return keys;
}
private boolean notNull(final Object object) {
return null != object;
}
}
| 1,565 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveTypeMapping.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.collect.ImmutableMap;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeEnum;
import com.netflix.metacat.common.type.VarbinaryType;
import lombok.Getter;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import java.util.Map;
/**
* Hive type mapping.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveTypeMapping {
@Getter
private static final Map<Type, String> CANONICAL_TO_HIVE = ImmutableMap.<Type, String>builder()
.put(BaseType.TINYINT, serdeConstants.TINYINT_TYPE_NAME)
.put(BaseType.SMALLINT, serdeConstants.SMALLINT_TYPE_NAME)
.put(BaseType.INT, serdeConstants.INT_TYPE_NAME)
.put(BaseType.BIGINT, serdeConstants.BIGINT_TYPE_NAME)
.put(BaseType.FLOAT, serdeConstants.FLOAT_TYPE_NAME)
.put(BaseType.DOUBLE, serdeConstants.DOUBLE_TYPE_NAME)
.put(BaseType.BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME)
.put(BaseType.STRING, serdeConstants.STRING_TYPE_NAME)
.put(VarbinaryType.VARBINARY, serdeConstants.BINARY_TYPE_NAME)
.put(BaseType.DATE, serdeConstants.DATE_TYPE_NAME)
.put(BaseType.TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME)
.build();
@Getter
private static final Map<String, Type> HIVE_TO_CANONICAL = ImmutableMap.<String, Type>builder()
.put(PrimitiveObjectInspector.PrimitiveCategory.BOOLEAN.name(), BaseType.BOOLEAN)
.put(PrimitiveObjectInspector.PrimitiveCategory.BYTE.name(), BaseType.TINYINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.SHORT.name(), BaseType.SMALLINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.INT.name(), BaseType.INT)
.put(PrimitiveObjectInspector.PrimitiveCategory.LONG.name(), BaseType.BIGINT)
.put(PrimitiveObjectInspector.PrimitiveCategory.FLOAT.name(), BaseType.FLOAT)
.put(PrimitiveObjectInspector.PrimitiveCategory.DOUBLE.name(), BaseType.DOUBLE)
.put(PrimitiveObjectInspector.PrimitiveCategory.DATE.name(), BaseType.DATE)
.put(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP.name(), BaseType.TIMESTAMP)
.put(PrimitiveObjectInspector.PrimitiveCategory.BINARY.name(), VarbinaryType.VARBINARY)
.put(PrimitiveObjectInspector.PrimitiveCategory.VOID.name(), VarbinaryType.VARBINARY)
.put(PrimitiveObjectInspector.PrimitiveCategory.STRING.name(), BaseType.STRING)
.put(TypeEnum.DATE.getType(), BaseType.DATE)
.build();
}
| 1,566 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/package-info.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector converters.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.converters;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,567 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/converters/HiveTypeConverter.java
|
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.converters;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.netflix.metacat.common.server.connectors.ConnectorTypeConverter;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.type.BaseType;
import com.netflix.metacat.common.type.CharType;
import com.netflix.metacat.common.type.DecimalType;
import com.netflix.metacat.common.type.MapType;
import com.netflix.metacat.common.type.ParametricType;
import com.netflix.metacat.common.type.RowType;
import com.netflix.metacat.common.type.Type;
import com.netflix.metacat.common.type.TypeEnum;
import com.netflix.metacat.common.type.TypeRegistry;
import com.netflix.metacat.common.type.TypeSignature;
import com.netflix.metacat.common.type.TypeUtils;
import com.netflix.metacat.common.type.VarcharType;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.Schema;
import org.apache.iceberg.types.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* Class to convert hive to canonical type and vice versa.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveTypeConverter implements ConnectorTypeConverter {
// matches decimal declarations with only scale, ex: decimal(38)
// matches declarations with spaces around '(', the scale and ')'
private static final String DECIMAL_WITH_SCALE
= "decimal\\s*\\(\\s*[0-9]+\\s*\\)";
// matches decimal declarations with scale and precision, ex: decimal(38,9)
// matches declarations with spaces around '(', the scale, the precision, the comma and ')'
private static final String DECIMAL_WITH_SCALE_AND_PRECISION
= "decimal\\s*\\(\\s*[0-9]+\\s*,\\s*[0-9]*\\s*\\)";
// combined compiled pattern to match both
private static final Pattern DECIMAL_TYPE
= Pattern.compile(DECIMAL_WITH_SCALE + "|" + DECIMAL_WITH_SCALE_AND_PRECISION, Pattern.CASE_INSENSITIVE);
private static Type getPrimitiveType(final ObjectInspector fieldInspector) {
final PrimitiveCategory primitiveCategory = ((PrimitiveObjectInspector) fieldInspector)
.getPrimitiveCategory();
if (HiveTypeMapping.getHIVE_TO_CANONICAL().containsKey(primitiveCategory.name())) {
return HiveTypeMapping.getHIVE_TO_CANONICAL().get(primitiveCategory.name());
}
switch (primitiveCategory) {
case DECIMAL:
final DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) ((PrimitiveObjectInspector) fieldInspector)
.getTypeInfo();
return DecimalType.createDecimalType(decimalTypeInfo.precision(), decimalTypeInfo.getScale());
case CHAR:
final int cLength = ((CharTypeInfo) ((PrimitiveObjectInspector)
fieldInspector).getTypeInfo()).getLength();
return CharType.createCharType(cLength);
case VARCHAR:
final int vLength = ((VarcharTypeInfo) ((PrimitiveObjectInspector) fieldInspector)
.getTypeInfo()).getLength();
return VarcharType.createVarcharType(vLength);
default:
return null;
}
}
@Override
public Type toMetacatType(final String type) {
// Hack to fix presto "varchar" type coming in with no length which is required by Hive.
final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(sanitizeType(type));
ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
// The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
// the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
// their original case
if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
structTypeInfo.getAllStructFieldNames(), objectInspector);
}
return getCanonicalType(oi);
}
/**
* Converts iceberg schema to field dto.
*
* @param schema schema
* @param partitionFields partitioned fields
* @return list of field Info
*/
public List<FieldInfo> icebergeSchemaTofieldDtos(final Schema schema,
final List<PartitionField> partitionFields) {
final List<FieldInfo> fields = Lists.newArrayList();
final List<String> partitionNames =
partitionFields.stream()
.map(f -> schema.findField(f.sourceId()).name()).collect(Collectors.toList());
for (Types.NestedField field : schema.columns()) {
final FieldInfo fieldInfo = new FieldInfo();
fieldInfo.setName(field.name());
final org.apache.iceberg.types.Type fieldType = field.type();
fieldInfo.setSourceType(fieldType.toString());
fieldInfo.setType(toMetacatType(fromIcebergToHiveType(fieldType)));
fieldInfo.setIsNullable(field.isOptional());
fieldInfo.setComment(field.doc());
fieldInfo.setPartitionKey(partitionNames.contains(field.name()));
fields.add(fieldInfo);
}
return fields;
}
/**
* convert iceberg to hive type.
* @param type iceberg type.
* @return hive type string.
*/
public static String fromIcebergToHiveType(final org.apache.iceberg.types.Type type) {
switch (type.typeId()) {
case BOOLEAN:
return serdeConstants.BOOLEAN_TYPE_NAME;
case INTEGER:
return serdeConstants.INT_TYPE_NAME;
case LONG:
return serdeConstants.BIGINT_TYPE_NAME;
case FLOAT:
return serdeConstants.FLOAT_TYPE_NAME;
case DOUBLE:
return serdeConstants.DOUBLE_TYPE_NAME;
case DATE:
return serdeConstants.DATE_TYPE_NAME;
case TIME:
throw new UnsupportedOperationException("Hive does not support time fields");
case TIMESTAMP:
return serdeConstants.TIMESTAMP_TYPE_NAME;
case STRING:
case UUID:
return serdeConstants.STRING_TYPE_NAME;
case FIXED:
return serdeConstants.BINARY_TYPE_NAME;
case BINARY:
return serdeConstants.BINARY_TYPE_NAME;
case DECIMAL:
final Types.DecimalType decimalType = (Types.DecimalType) type;
return String.format("decimal(%s,%s)", decimalType.precision(), decimalType.scale());
case STRUCT:
final Types.StructType structType = type.asStructType();
final String nameToType = (String) structType.fields().stream().map((f) -> {
return String.format("%s:%s", f.name(), fromIcebergToHiveType(f.type()));
}).collect(Collectors.joining(","));
return String.format("struct<%s>", nameToType);
case LIST:
final Types.ListType listType = type.asListType();
return String.format("array<%s>", fromIcebergToHiveType(listType.elementType()));
case MAP:
final Types.MapType mapType = type.asMapType();
return String.format("map<%s,%s>", fromIcebergToHiveType(mapType.keyType()),
fromIcebergToHiveType(mapType.valueType()));
default:
throw new UnsupportedOperationException(type + " is not supported");
}
}
@Override
public String fromMetacatType(final Type type) {
if (HiveTypeMapping.getCANONICAL_TO_HIVE().containsKey(type)) {
return HiveTypeMapping.getCANONICAL_TO_HIVE().get(type);
}
if (type instanceof DecimalType | type instanceof CharType | type instanceof VarcharType) {
return type.getDisplayName();
} else if (type.getTypeSignature().getBase().equals(TypeEnum.MAP)) {
final MapType mapType = (MapType) type;
return "map<" + fromMetacatType(mapType.getKeyType())
+ "," + fromMetacatType(mapType.getValueType()) + ">";
} else if (type.getTypeSignature().getBase().equals(TypeEnum.ROW)) {
final RowType rowType = (RowType) type;
final String typeString = rowType.getFields()
.stream()
.map(this::rowFieldToString)
.collect(Collectors.joining(","));
return "struct<" + typeString + ">";
} else if (type.getTypeSignature().getBase().equals(TypeEnum.ARRAY)) {
final String typeString = ((ParametricType) type).getParameters().stream().map(this::fromMetacatType)
.collect(Collectors.joining(","));
return "array<" + typeString + ">";
}
return null;
}
private String rowFieldToString(final RowType.RowField rowField) {
String prefix = "";
if (rowField.getName() != null) {
prefix = rowField.getName() + ":";
}
return prefix + fromMetacatType(rowField.getType());
}
/**
* Sanitize the type to handle Hive type conversion edge cases.
*
* @param type the type to sanitize
* @return the sanitized type
*/
public static String sanitizeType(final String type) {
if ("varchar".equalsIgnoreCase(type)) {
return serdeConstants.STRING_TYPE_NAME;
} else {
// the current version of Hive (1.2.1) cannot handle spaces in column definitions
// this was fixed in 1.3.0. See: https://issues.apache.org/jira/browse/HIVE-11476
// this bug caused an error in loading the table information in Metacat
// see: https://netflix.slack.com/archives/G0SUNC804/p1676930065306799
// Here the offending column definition was decimal(38, 9)
// which had a space between the command and the digit 9
// instead of upgrading the Hive version, we are making a targeted "fix"
// to handle this space in a decimal column declaration
// the regex we use tries to match various decimal declarations
// and handles decimal types inside other type declarations like array and struct
// see the unit test for those method for all the cases handled
final Matcher matcher = DECIMAL_TYPE.matcher(type);
final StringBuilder replacedType = new StringBuilder();
// keep track of the start of the substring that we haven't matched yet
// more explanation on how this is used is below
int prevStart = 0;
// we cannot simply use matcher.matches() and matcher.replaceAll()
// because that will replace the decimal declaration itself
// instead we use the region APIs (start() and end()) to find the substring that matched
// and then apply the replace function to remove spaces in the decimal declaration
// we do this for all the matches in the type declaration and hence the usage of the while loop
while (matcher.find()) {
// this index represents the start index (inclusive) of our current match
final int currMatchStart = matcher.start();
// this represents the end index (exclusive) of our current match
final int currMatchEnd = matcher.end();
replacedType
// first append any part of the string that did not match
// this is represented by the prevStart (inclusive) till the start of the current match (exclusive)
// this append should not need any replacement and can be added verbatim
.append(type, prevStart, currMatchStart)
// Then append the matching part which should be a decimal declaration
// The matching part is start (inclusive) and end (exclusive)
// This part should go through a replacement to remove spaces
.append(type.substring(currMatchStart, currMatchEnd).replaceAll("\\s", ""));
// update the prevStart marker so that for the next match
// we know where to start to add the non-matching part
prevStart = currMatchEnd;
}
// append any remaining part of the input type to the final answer
// again, no replacement necessary for this part since it should not contain and decimal declarations
// phew!
replacedType.append(type.substring(prevStart));
return replacedType.toString();
}
}
/**
* Returns the canonical type.
*
* @param fieldInspector inspector
* @return type
*/
Type getCanonicalType(final ObjectInspector fieldInspector) {
switch (fieldInspector.getCategory()) {
case PRIMITIVE:
return getPrimitiveType(fieldInspector);
case MAP:
final MapObjectInspector mapObjectInspector =
TypeUtils.checkType(fieldInspector, MapObjectInspector.class,
"fieldInspector");
final Type keyType = getCanonicalType(mapObjectInspector.getMapKeyObjectInspector());
final Type valueType = getCanonicalType(mapObjectInspector.getMapValueObjectInspector());
if (keyType == null || valueType == null) {
return null;
}
return TypeRegistry.getTypeRegistry().getParameterizedType(TypeEnum.MAP,
ImmutableList.of(keyType.getTypeSignature(), valueType.getTypeSignature()), ImmutableList.of());
case LIST:
final ListObjectInspector listObjectInspector =
TypeUtils.checkType(fieldInspector, ListObjectInspector.class,
"fieldInspector");
final Type elementType =
getCanonicalType(listObjectInspector.getListElementObjectInspector());
if (elementType == null) {
return null;
}
return TypeRegistry.getTypeRegistry().getParameterizedType(TypeEnum.ARRAY,
ImmutableList.of(elementType.getTypeSignature()), ImmutableList.of());
case STRUCT:
final StructObjectInspector structObjectInspector =
TypeUtils.checkType(fieldInspector, StructObjectInspector.class, "fieldInspector");
final List<TypeSignature> fieldTypes = new ArrayList<>();
final List<Object> fieldNames = new ArrayList<>();
for (StructField field : structObjectInspector.getAllStructFieldRefs()) {
fieldNames.add(field.getFieldName());
final Type fieldType = getCanonicalType(field.getFieldObjectInspector());
if (fieldType == null) {
return null;
}
fieldTypes.add(fieldType.getTypeSignature());
}
return TypeRegistry.getTypeRegistry()
.getParameterizedType(TypeEnum.ROW, fieldTypes, fieldNames);
default:
log.info("Currently unsupported type {}, returning Unknown type", fieldInspector.getTypeName());
return BaseType.UNKNOWN;
}
}
// This is protected and extends StandardStructObjectInspector so it can reference MyField
protected static class SameCaseStandardStructObjectInspector extends StandardStructObjectInspector {
private final List<String> realFieldNames;
private final StandardStructObjectInspector structObjectInspector;
public SameCaseStandardStructObjectInspector(final List<String> realFieldNames,
final StandardStructObjectInspector structObjectInspector) {
this.realFieldNames = realFieldNames;
this.structObjectInspector = structObjectInspector;
}
@Override
public List<? extends StructField> getAllStructFieldRefs() {
return structObjectInspector.getAllStructFieldRefs()
.stream()
.map(structField -> (MyField) structField)
.map(field -> new HiveTypeConverter.
SameCaseStandardStructObjectInspector.SameCaseMyField(field.getFieldID(),
realFieldNames.get(field.getFieldID()),
field.getFieldObjectInspector(), field.getFieldComment()))
.collect(Collectors.toList());
}
protected static class SameCaseMyField extends MyField {
public SameCaseMyField(final int fieldID, final String fieldName,
final ObjectInspector fieldObjectInspector,
final String fieldComment) {
super(fieldID, fieldName, fieldObjectInspector, fieldComment);
// Since super lower cases fieldName, this is to restore the original case
this.fieldName = fieldName;
}
}
}
}
| 1,568 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/DataMetadataMetricConstants.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
/**
* Data Metric Constants.
*
* @author zhenl
* @since 1.2.0
*/
public final class DataMetadataMetricConstants {
/**
* DATA_METADATA_METRIC_NAME.
*/
public static final String DATA_METADATA_METRIC_NAME = "metrics";
/**
* DATA_METADATA_VALUE.
*/
public static final String DATA_METADATA_VALUE = "value";
private DataMetadataMetricConstants() { }
}
| 1,569 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOpsProxy.java
|
package com.netflix.metacat.connector.hive.iceberg;
import org.apache.iceberg.TableMetadata;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.Cacheable;
/**
* Proxy class to get the metadata from cache if exists.
*/
@CacheConfig(cacheNames = "metacat")
public class IcebergTableOpsProxy {
/**
* Return the table metadata from cache if exists. If not exists, make the iceberg call to refresh it.
* @param icebergTableOps iceberg table operations
* @param useCache true, if table can be retrieved from cache
* @return TableMetadata
*/
@Cacheable(key = "'iceberg.' + #icebergTableOps.currentMetadataLocation()", condition = "#useCache")
public TableMetadata getMetadata(final IcebergTableOps icebergTableOps, final boolean useCache) {
return icebergTableOps.refresh();
}
}
| 1,570 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergMetastoreTables.java
|
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import org.apache.iceberg.BaseMetastoreCatalog;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import java.util.List;
import java.util.Map;
/**
* Implemented BaseMetastoreTables to interact with iceberg library.
* Load an iceberg table from a location.
*/
public final class IcebergMetastoreTables extends BaseMetastoreCatalog {
private IcebergTableOps tableOperations;
IcebergMetastoreTables(final IcebergTableOps tableOperations) {
this.tableOperations = tableOperations;
}
@Override
public List<TableIdentifier> listTables(final Namespace namespace) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public String name() {
return "";
}
@Override
public Table createTable(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Transaction newCreateTableTransaction(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Transaction newReplaceTableTransaction(final TableIdentifier identifier,
final Schema schema,
final PartitionSpec spec,
final String location,
final Map<String, String> properties,
final boolean orCreate) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public Table loadTable(final TableIdentifier identifier) {
return super.loadTable(identifier);
}
@Override
protected TableOperations newTableOps(final TableIdentifier tableIdentifier) {
return getTableOps();
}
@Override
protected String defaultWarehouseLocation(final TableIdentifier tableIdentifier) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public boolean dropTable(final TableIdentifier identifier,
final boolean purge) {
throw new MetacatNotSupportedException("not supported");
}
@Override
public void renameTable(final TableIdentifier from,
final TableIdentifier to) {
throw new MetacatNotSupportedException("not supported");
}
/**
* Creates a new instance of IcebergTableOps for the given table location, if not exists.
*
* @return a MetacatServerOps for the table
*/
public IcebergTableOps getTableOps() {
return tableOperations;
}
}
| 1,571 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/DataMetadataMetrics.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import lombok.Getter;
/**
* Data Metrics.
*
* @author zhenl
* @since 1.2.0
*/
@Getter
public enum DataMetadataMetrics {
/**
* number of rows.
*/
rowCount("com.netflix.dse.mds.metric.RowCount"),
/**
* number of files.
*/
fileCount("com.netflix.dse.mds.metric.NumFiles");
private final String metricName;
DataMetadataMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 1,572 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOps.java
|
package com.netflix.metacat.connector.hive.iceberg;
import com.google.common.base.Throwables;
import com.netflix.metacat.common.server.properties.Config;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.io.FileIO;
/**
* Implemented the BaseMetastoreTableOperations to interact with iceberg library.
* Read only operations.
*/
public class IcebergTableOps extends BaseMetastoreTableOperations {
private String location;
private String tableName;
private final Configuration conf;
private final Config config;
private final IcebergTableOpsProxy icebergTableOpsProxy;
private TableMetadata tableMetadata;
/**
* Constructor.
* @param conf hive configuration
* @param location table manifest location
* @param tableName table name
* @param config server config
* @param icebergTableOpsProxy IcebergTableOps proxy
*/
public IcebergTableOps(final Configuration conf,
final String location,
final String tableName,
final Config config,
final IcebergTableOpsProxy icebergTableOpsProxy) {
this.location = location;
this.tableName = tableName;
this.conf = conf;
this.config = config;
this.icebergTableOpsProxy = icebergTableOpsProxy;
}
@Override
protected String tableName() {
return tableName;
}
@Override
public TableMetadata current() {
if (tableMetadata == null) {
tableMetadata =
icebergTableOpsProxy.getMetadata(this, config.isIcebergTableMetadataCacheEnabled());
}
return tableMetadata;
}
@Override
public FileIO io() {
return new HadoopFileIO(conf);
}
@Override
public TableMetadata refresh() {
try {
refreshFromMetadataLocation(this.location, config.getIcebergRefreshFromMetadataLocationRetryNumber());
return super.current();
} catch (Exception e) {
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex.getMessage().contains("NoSuchKey")) {
throw new NotFoundException(e, String.format("Location %s does not exist", location));
}
}
throw e;
}
}
@Override
public String currentMetadataLocation() {
return location;
}
@Override
public void commit(final TableMetadata base, final TableMetadata metadata) {
if (!base.equals(metadata)) {
location = writeNewMetadata(metadata, currentVersion() + 1);
tableMetadata = null;
this.requestRefresh();
}
}
}
| 1,573 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableCriteria.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.QualifiedName;
/**
* Iceberg Table Criteria.
*
* @author zhenl
* @since 1.2.0
*/
public interface IcebergTableCriteria {
/**
* To control iceberg table operation in metacat. The criteria implementation throws exception if
* the iceberg table doesn't satisfy the criteria, e.g. the manifest file doesn't exist, or too large.
*
* @param tableName qualified table name
* @param tableLocation table location.
*/
default void checkCriteria(final QualifiedName tableName, final String tableLocation) {
}
}
| 1,574 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableHandler.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.ConnectorUtils;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.FieldInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.parser.ParseException;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.sql.DirectSqlGetPartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.util.HiveTableUtil;
import com.netflix.metacat.connector.hive.util.IcebergFilterGenerator;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.ScanSummary;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadataParser;
import org.apache.iceberg.UpdateSchema;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.types.Types;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.StringReader;
import java.time.Instant;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* Iceberg table handler which interacts with iceberg library
* to perform iceberg table loading, querying, etc. The operations limit to
* read-only for now.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class IcebergTableHandler {
private static final Retryer<Void> RETRY_ICEBERG_TABLE_UPDATE = RetryerBuilder.<Void>newBuilder()
.retryIfExceptionOfType(TablePreconditionFailedException.class)
.withStopStrategy(StopStrategies.stopAfterAttempt(3))
.build();
private final Configuration conf;
private final ConnectorContext connectorContext;
private final Registry registry;
@VisibleForTesting
private IcebergTableCriteria icebergTableCriteria;
@VisibleForTesting
private IcebergTableOpWrapper icebergTableOpWrapper;
private IcebergTableOpsProxy icebergTableOpsProxy;
/**
* Constructor.
*
* @param connectorContext connector context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
*/
public IcebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
this.conf = new Configuration();
this.connectorContext = connectorContext;
this.registry = connectorContext.getRegistry();
connectorContext.getConfiguration().keySet()
.forEach(key -> conf.set(key, connectorContext.getConfiguration().get(key)));
this.icebergTableCriteria = icebergTableCriteria;
this.icebergTableOpWrapper = icebergTableOpWrapper;
this.icebergTableOpsProxy = icebergTableOpsProxy;
}
/**
* Returns the partitions for the given table and filter.
*
* @param tableInfo the table info
* @param context the request context
* @param filterExpression the filter expression
* @param partitionIds the partition ids to match
* @param sort the sort order
* @return the list of partitions
*/
public List<PartitionInfo> getPartitions(final TableInfo tableInfo,
final ConnectorContext context,
@Nullable final String filterExpression,
@Nullable final List<String> partitionIds,
@Nullable final Sort sort) {
final QualifiedName tableName = tableInfo.getName();
final org.apache.iceberg.Table icebergTable = getIcebergTable(tableName,
HiveTableUtil.getIcebergTableMetadataLocation(tableInfo), false).getTable();
final Map<String, ScanSummary.PartitionMetrics> partitionMap
= getIcebergTablePartitionMap(tableName, filterExpression, icebergTable);
final AuditInfo tableAuditInfo = tableInfo.getAudit();
final List<PartitionInfo> filteredPartitionList = partitionMap.keySet().stream()
.filter(partitionName -> partitionIds == null || partitionIds.contains(partitionName))
.map(partitionName ->
PartitionInfo.builder().name(
QualifiedName.ofPartition(tableName.getCatalogName(),
tableName.getDatabaseName(),
tableName.getTableName(),
partitionName)
).serde(StorageInfo.builder().uri(
getIcebergPartitionURI(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionName,
partitionMap.get(partitionName).dataTimestampMillis(),
context
)).build()
)
.dataMetrics(getDataMetadataFromIcebergMetrics(partitionMap.get(partitionName)))
.auditInfo(
AuditInfo.builder()
.createdBy(tableAuditInfo.getCreatedBy())
.createdDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis()))
.lastModifiedDate(
fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis()))
.build()
).build()
)
.collect(Collectors.toList());
if (sort != null) {
if (sort.hasSort() && sort.getSortBy().equalsIgnoreCase(DirectSqlGetPartition.FIELD_DATE_CREATED)) {
final Comparator<PartitionInfo> dateCreatedComparator = Comparator.comparing(
p -> p.getAudit() != null ? p.getAudit().getCreatedDate() : null,
Comparator.nullsLast(Date::compareTo));
ConnectorUtils.sort(filteredPartitionList, sort, dateCreatedComparator);
} else {
// Sort using the partition name by default
final Comparator<PartitionInfo> nameComparator = Comparator.comparing(p -> p.getName().toString());
ConnectorUtils.sort(filteredPartitionList, sort, nameComparator);
}
}
return filteredPartitionList;
}
/**
* get Partition Map.
*
* @param tableName Qualified table name
* @param filterExpression the filter
* @param icebergTable iceberg Table
* @return partition map
*/
public Map<String, ScanSummary.PartitionMetrics> getIcebergTablePartitionMap(
final QualifiedName tableName,
@Nullable final String filterExpression,
final Table icebergTable) {
final long start = this.registry.clock().wallTime();
final Map<String, ScanSummary.PartitionMetrics> result;
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final IcebergFilterGenerator icebergFilterGenerator
= new IcebergFilterGenerator(icebergTable.schema().columns());
final Expression filter = (Expression) new PartitionParser(
new StringReader(filterExpression)).filter()
.jjtAccept(icebergFilterGenerator, null);
result = this.icebergTableOpWrapper.getPartitionMetricsMap(icebergTable, filter);
} else {
result = this.icebergTableOpWrapper.getPartitionMetricsMap(icebergTable, null);
}
} catch (ParseException ex) {
log.error("Iceberg filter parse error: ", ex);
throw new IllegalArgumentException(String.format("Iceberg filter parse error. Ex: %s", ex.getMessage()));
} catch (IllegalStateException e) {
registry.counter(registry.createId(IcebergRequestMetrics.CounterGetPartitionsExceedThresholdFailure
.getMetricName()).withTags(tableName.parts())).increment();
final String message =
String.format("Number of partitions queried for table %s exceeded the threshold %d",
tableName, connectorContext.getConfig().getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to getIcebergTablePartitionMap {} is {} ms", tableName, duration);
this.recordTimer(
IcebergRequestMetrics.TagGetPartitionMap.getMetricName(), duration);
this.increaseCounter(
IcebergRequestMetrics.TagGetPartitionMap.getMetricName(), tableName);
}
return result;
}
/**
* get iceberg table.
*
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param includeInfoDetails if true, will include more details like the manifest file content
* @return iceberg table
*/
public IcebergTableWrapper getIcebergTable(final QualifiedName tableName, final String tableMetadataLocation,
final boolean includeInfoDetails) {
final long start = this.registry.clock().wallTime();
try {
this.icebergTableCriteria.checkCriteria(tableName, tableMetadataLocation);
log.debug("Loading icebergTable {} from {}", tableName, tableMetadataLocation);
final IcebergMetastoreTables icebergMetastoreTables = new IcebergMetastoreTables(
new IcebergTableOps(conf, tableMetadataLocation, tableName.getTableName(),
connectorContext.getConfig(),
icebergTableOpsProxy));
final Table table = icebergMetastoreTables.loadTable(
HiveTableUtil.qualifiedNameToTableIdentifier(tableName));
final Map<String, String> extraProperties = Maps.newHashMap();
if (includeInfoDetails) {
extraProperties.put(DirectSqlTable.PARAM_METADATA_CONTENT,
TableMetadataParser.toJson(icebergMetastoreTables.getTableOps().current()));
}
return new IcebergTableWrapper(table, extraProperties);
} catch (NotFoundException | NoSuchTableException e) {
throw new InvalidMetaException(tableName, e);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to getIcebergTable {} is {} ms", tableName, duration);
this.recordTimer(IcebergRequestMetrics.TagLoadTable.getMetricName(), duration);
this.increaseCounter(IcebergRequestMetrics.TagLoadTable.getMetricName(), tableName);
}
}
/**
* Updates the iceberg schema if the provided tableInfo has updated field comments.
*
* @param tableInfo table information
* @return true if an update is done
*/
public boolean update(final TableInfo tableInfo) {
boolean result = false;
final List<FieldInfo> fields = tableInfo.getFields();
if (fields != null && !fields.isEmpty()
// This parameter is only sent during data change and not during schema change.
&& Strings.isNullOrEmpty(tableInfo.getMetadata().get(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION))) {
final QualifiedName tableName = tableInfo.getName();
final String tableMetadataLocation = HiveTableUtil.getIcebergTableMetadataLocation(tableInfo);
if (Strings.isNullOrEmpty(tableMetadataLocation)) {
final String message = String.format("No metadata location specified for table %s", tableName);
log.error(message);
throw new MetacatBadRequestException(message);
}
final IcebergMetastoreTables icebergMetastoreTables = new IcebergMetastoreTables(
new IcebergTableOps(conf, tableMetadataLocation, tableName.getTableName(),
connectorContext.getConfig(),
icebergTableOpsProxy));
final Table table = icebergMetastoreTables.loadTable(
HiveTableUtil.qualifiedNameToTableIdentifier(tableName));
final UpdateSchema updateSchema = table.updateSchema();
final Schema schema = table.schema();
for (FieldInfo field : fields) {
final Types.NestedField iField = schema.findField(field.getName());
if (iField != null && !Objects.equals(field.getComment(), iField.doc())) {
updateSchema.updateColumnDoc(field.getName(), field.getComment());
result = true;
}
}
if (result) {
updateSchema.commit();
final String newTableMetadataLocation = icebergMetastoreTables.getTableOps().currentMetadataLocation();
if (!tableMetadataLocation.equalsIgnoreCase(newTableMetadataLocation)) {
tableInfo.getMetadata().put(DirectSqlTable.PARAM_PREVIOUS_METADATA_LOCATION, tableMetadataLocation);
tableInfo.getMetadata().put(DirectSqlTable.PARAM_METADATA_LOCATION, newTableMetadataLocation);
}
}
}
return result;
}
/**
* Handle iceberg table update operation.
*
* @param requestContext request context
* @param directSqlTable direct sql table object
* @param tableInfo table info
*/
public void handleUpdate(final ConnectorRequestContext requestContext,
final DirectSqlTable directSqlTable,
final TableInfo tableInfo) {
requestContext.setIgnoreErrorsAfterUpdate(true);
this.update(tableInfo);
// TODO: only trying once for correctness for now to fix a race condition that could lead to data loss
// but this needs more retries in case of schema updates for better user experience
directSqlTable.updateIcebergTable(tableInfo);
}
/**
* get data metadata from partition metrics.
*
* @param metrics metrics.
* @return object node of the metrics
*/
public ObjectNode getDataMetadataFromIcebergMetrics(
final ScanSummary.PartitionMetrics metrics) {
final ObjectNode root = JsonNodeFactory.instance.objectNode();
root.set(DataMetadataMetricConstants.DATA_METADATA_METRIC_NAME, getMetricValueNode(metrics));
return root;
}
/**
* Checks if the given iceberg table metadata location exists.
*
* @param tableName The table name.
* @param metadataLocation The metadata location.
* @return True if the location exists.
*/
public boolean doesMetadataLocationExist(final QualifiedName tableName,
final String metadataLocation) {
boolean result = false;
if (!StringUtils.isBlank(metadataLocation)) {
try {
final Path metadataPath = new Path(metadataLocation);
result = getFs(metadataPath, conf).exists(metadataPath);
} catch (Exception ignored) {
log.warn(String.format("Failed getting the filesystem for metadata location: %s tableName: %s",
metadataLocation, tableName));
registry.counter(HiveMetrics.CounterFileSystemReadFailure.name()).increment();
}
}
return result;
}
private static FileSystem getFs(final Path path,
final Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException ex) {
throw new RuntimeException(String.format("Failed to get file system for path: %s", path));
}
}
private ObjectNode getMetricValueNode(final ScanSummary.PartitionMetrics metrics) {
final ObjectNode node = JsonNodeFactory.instance.objectNode();
ObjectNode valueNode = JsonNodeFactory.instance.objectNode();
valueNode.put(DataMetadataMetricConstants.DATA_METADATA_VALUE, metrics.recordCount());
node.set(DataMetadataMetrics.rowCount.getMetricName(), valueNode);
valueNode = JsonNodeFactory.instance.objectNode();
valueNode.put(DataMetadataMetricConstants.DATA_METADATA_VALUE, metrics.fileCount());
node.set(DataMetadataMetrics.fileCount.getMetricName(), valueNode);
return node;
}
/**
* record the duration to timer.
*
* @param requestTag tag name.
* @param duration duration of the operation.
*/
private void recordTimer(final String requestTag, final long duration) {
final HashMap<String, String> tags = new HashMap<>();
tags.put("request", requestTag);
this.registry.timer(registry.createId(IcebergRequestMetrics.TimerIcebergRequest.getMetricName())
.withTags(tags))
.record(duration, TimeUnit.MILLISECONDS);
log.debug("## Time taken to complete {} is {} ms", requestTag, duration);
}
/**
* increase the counter of operation.
*
* @param metricName metric name
* @param tableName table name of the operation
*/
private void increaseCounter(final String metricName, final QualifiedName tableName) {
this.registry.counter(registry.createId(metricName).withTags(tableName.parts())).increment();
}
private Date fromEpochMilliToDate(@Nullable final Long l) {
return (l == null) ? null : Date.from(Instant.ofEpochMilli(l));
}
//iceberg://<db-name.table-name>/<partition>/snapshot_time=<dateCreated>
private String getIcebergPartitionURI(final String databaseName,
final String tableName,
final String partitionName,
@Nullable final Long dataTimestampMillis,
final ConnectorContext context) {
return String.format("%s://%s.%s/%s/snapshot_time=%s",
context.getConfig().getIcebergPartitionUriScheme(),
databaseName,
tableName,
partitionName,
(dataTimestampMillis == null) ? partitionName.hashCode()
: Instant.ofEpochMilli(dataTimestampMillis).getEpochSecond());
}
}
| 1,575 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableWrapper.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.iceberg;
import org.apache.iceberg.Table;
import lombok.Data;
import java.util.Map;
/**
* This class represents the iceberg table.
*/
@Data
public class IcebergTableWrapper {
private final Table table;
private final Map<String, String> extraProperties;
}
| 1,576 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableCriteriaImpl.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
/**
* Default Iceberg table criteria implementation.
*
* @author zhenl
* @since 1.2.0
*/
public class IcebergTableCriteriaImpl implements IcebergTableCriteria {
private final ConnectorContext connectorContext;
/**
* Iceberg table criteriaImpl constructor.
* @param connectorContext connector context
*/
public IcebergTableCriteriaImpl(final ConnectorContext connectorContext) {
this.connectorContext = connectorContext;
}
}
| 1,577 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergTableOpWrapper.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.metacat.connector.hive.iceberg;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.apache.iceberg.ScanSummary;
import org.apache.iceberg.Table;
import org.apache.iceberg.expressions.Expression;
import javax.annotation.Nullable;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
* Iceberg table operation wrapper.
*
* @author zhenl
* @since 1.2.0
*/
@Slf4j
public class IcebergTableOpWrapper {
private final Config config;
private final Map<String, String> configuration;
private final ThreadServiceManager threadServiceManager;
/**
* Constructor.
* @param connectorContext server context
* @param threadServiceManager executor service
*/
public IcebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
this.config = connectorContext.getConfig();
this.configuration = connectorContext.getConfiguration();
this.threadServiceManager = threadServiceManager;
}
/**
* get iceberg partition map.
*
* @param icebergTable iceberg table
* @param filter iceberg filter expression
* @return scan summary map
*/
public Map<String, ScanSummary.PartitionMetrics> getPartitionMetricsMap(final Table icebergTable,
@Nullable final Expression filter) {
Map<String, ScanSummary.PartitionMetrics> result = Maps.newHashMap();
//
// Cancel the iceberg call if it times out.
//
final Future<Map<String, ScanSummary.PartitionMetrics>> future = threadServiceManager.getExecutor()
.submit(() -> (filter != null) ? ScanSummary.of(icebergTable.newScan().filter(filter))
.limit(config.getMaxPartitionsThreshold())
.throwIfLimited()
.build()
:
ScanSummary.of(icebergTable.newScan()) //the top x records
.limit(config.getIcebergTableSummaryFetchSize())
.build());
try {
final int getIcebergPartitionsTimeout = Integer.parseInt(configuration
.getOrDefault(HiveConfigConstants.GET_ICEBERG_PARTITIONS_TIMEOUT, "120"));
result = future.get(getIcebergPartitionsTimeout, TimeUnit.SECONDS);
} catch (Exception e) {
if (!future.isDone()) {
try {
future.cancel(true);
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partitions for an iceberg table.");
}
}
if (e instanceof ExecutionException && e.getCause() != null) {
//
// On execution exception, throw the inner exception. This is added to throw these as 4xx errors
// instead of 5xx.
//
if (e.getCause() instanceof IllegalArgumentException) {
throw (IllegalArgumentException) e.getCause();
}
Throwables.propagate(e.getCause());
}
Throwables.propagate(e);
}
return result;
}
}
| 1,578 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/package-info.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Iceberg table interaction.
*
* @author zhenl
* @since 1.2.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.iceberg;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,579 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/iceberg/IcebergRequestMetrics.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.iceberg;
import lombok.Getter;
/**
* Iceberg Metrics.
*
* @author zhenl
* @since 1.2.0
*/
@Getter
public enum IcebergRequestMetrics {
/**
* Timer.
*/
TimerIcebergRequest(IcebergRequestMetrics.Type.timer, "requests"),
/**
* Tag of loadTable operation.
*/
TagLoadTable("loadTable"),
/**
* Tag of getPartitionMap operation.
*/
TagGetPartitionMap("getPartitionMap"),
/**
* Counter.
*/
CounterGetPartitionsExceedThresholdFailure(IcebergRequestMetrics.Type.counter,
"getPartitionsExceedThresholdFailure");
enum Type {
counter,
gauge,
timer
}
private final String metricName;
IcebergRequestMetrics(final IcebergRequestMetrics.Type type, final String measure) {
this.metricName = String.format("metacat.iceberg.%s.%s", type.name(), measure);
}
IcebergRequestMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 1,580 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.common.server.connectors.util.TimeUtil;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.client.thrift.HiveMetastoreClientFactory;
import com.netflix.metacat.connector.hive.client.thrift.MetacatHiveClient;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.URI;
import java.util.concurrent.TimeUnit;
/**
* Hive configs.
*
* @author zhenl
* @since 1.1.0
*/
@Slf4j
@Configuration
public class HiveConnectorConfig {
/**
* create hive connector database service.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverter metacat converter
* @return HiveConnectorDatabaseService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorDatabaseService.class)
public HiveConnectorDatabaseService hiveDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverter
) {
return new HiveConnectorDatabaseService(
metacatHiveClient,
hiveMetacatConverter
);
}
/**
* create hive connector table service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param hiveConnectorDatabaseService hive database service
* @param connectorContext connector config
* @return HiveConnectorTableService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorTableService.class)
public HiveConnectorTableService hiveTableService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final ConnectorContext connectorContext
) {
return new HiveConnectorTableService(
connectorContext.getCatalogName(),
metacatHiveClient,
hiveConnectorDatabaseService,
hiveMetacatConverters,
connectorContext
);
}
/**
* create hive connector partition service.
*
* @param metacatHiveClient hive client
* @param hiveMetacatConverter metacat converter
* @param connectorContext connector config
* @return HiveConnectorPartitionService
*/
@Bean
@ConditionalOnMissingBean(HiveConnectorPartitionService.class)
public HiveConnectorPartitionService partitionService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverter,
final ConnectorContext connectorContext
) {
return new HiveConnectorPartitionService(
connectorContext,
metacatHiveClient,
hiveMetacatConverter
);
}
/**
* create thrift hive client.
*
* @param connectorContext connector config.
* @return data source
* @throws MetaException meta exception
*/
@Bean
@ConditionalOnMissingBean(IMetacatHiveClient.class)
public IMetacatHiveClient createThriftClient(final ConnectorContext connectorContext) throws MetaException {
final HiveMetastoreClientFactory factory = new HiveMetastoreClientFactory(
null,
(int) TimeUtil.toTime(
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.HIVE_METASTORE_TIMEOUT, "20s"),
TimeUnit.SECONDS,
TimeUnit.MILLISECONDS
)
);
final String metastoreUri = connectorContext.getConfiguration().get(HiveConfigConstants.THRIFT_URI);
try {
return new MetacatHiveClient(new URI(metastoreUri), factory);
} catch (Exception e) {
final String message = String.format("Invalid thrift uri %s", metastoreUri);
log.info(message);
throw new IllegalArgumentException(message, e);
}
}
/**
* thread Service Manager.
* @param connectorContext connector config
* @return threadServiceManager
*/
@Bean
public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) {
return new ThreadServiceManager(connectorContext.getRegistry(),
connectorContext.getConfig().getServiceMaxNumberOfThreads(),
1000,
"hive");
}
}
| 1,581 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/CacheConfig.java
|
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring configuration for cache.
*
* @author amajumdar
* @since 1.3.0
*/
@Configuration
@ConditionalOnProperty(value = "metacat.cache.enabled", havingValue = "true")
@EnableCaching
public class CacheConfig {
/**
* Returns the cache manager from the parent application context.
* @param connectorContext conector context
* @return CacheManager
*/
@Bean
public CacheManager cacheManager(final ConnectorContext connectorContext) {
return connectorContext.getApplicationContext().getBean(CacheManager.class);
}
}
| 1,582 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorFastServiceConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.HiveConnectorDatabaseService;
import com.netflix.metacat.connector.hive.HiveConnectorPartitionService;
import com.netflix.metacat.connector.hive.HiveConnectorTableService;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.commonview.CommonViewHandler;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteria;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableCriteriaImpl;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpWrapper;
import com.netflix.metacat.connector.hive.iceberg.IcebergTableOpsProxy;
import com.netflix.metacat.connector.hive.sql.DirectSqlDatabase;
import com.netflix.metacat.connector.hive.sql.DirectSqlGetPartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlSavePartition;
import com.netflix.metacat.connector.hive.sql.DirectSqlTable;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastDatabaseService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastPartitionService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastTableService;
import com.netflix.metacat.connector.hive.sql.HiveConnectorFastTableServiceProxy;
import com.netflix.metacat.connector.hive.sql.SequenceGeneration;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.transaction.annotation.EnableTransactionManagement;
/**
* HiveConnectorFastServiceConfig.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@EnableTransactionManagement(proxyTargetClass = true)
@ConditionalOnProperty(value = "useHiveFastService", havingValue = "true")
public class HiveConnectorFastServiceConfig {
/**
* create hive connector fast service metric.
*
* @param connectorContext connector config
* @return HiveConnectorFastServiceMetric
*/
@Bean
public HiveConnectorFastServiceMetric hiveConnectorFastServiceMetric(
final ConnectorContext connectorContext
) {
return new HiveConnectorFastServiceMetric(
connectorContext.getRegistry()
);
}
/**
* create hive connector fast partition service.
*
* @param metacatHiveClient hive client
* @param warehouse hive warehouse
* @param hiveMetacatConverter metacat converter
* @param connectorContext connector config
* @param directSqlGetPartition service to get partitions
* @param directSqlSavePartition service to save partitions
* @param icebergTableHandler iceberg table handler
* @return HiveConnectorPartitionService
*/
@Bean
public HiveConnectorPartitionService partitionService(
final IMetacatHiveClient metacatHiveClient,
final Warehouse warehouse,
final HiveConnectorInfoConverter hiveMetacatConverter,
final ConnectorContext connectorContext,
final DirectSqlGetPartition directSqlGetPartition,
final DirectSqlSavePartition directSqlSavePartition,
final IcebergTableHandler icebergTableHandler
) {
return new HiveConnectorFastPartitionService(
connectorContext,
metacatHiveClient,
warehouse,
hiveMetacatConverter,
directSqlGetPartition,
directSqlSavePartition,
icebergTableHandler
);
}
/**
* Service to get partitions.
*
* @param threadServiceManager thread service manager
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @return HiveConnectorPartitionService
*/
@Bean
public DirectSqlGetPartition directSqlGetPartition(
final ThreadServiceManager threadServiceManager,
final ConnectorContext connectorContext,
@Qualifier("hiveReadJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlGetPartition(
connectorContext,
threadServiceManager,
hiveJdbcTemplate,
serviceMetric
);
}
/**
* Service to save partitions.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param sequenceGeneration sequence generator
* @param serviceMetric fast service metric
* @return HiveConnectorPartitionService
*/
@Bean
public DirectSqlSavePartition directSqlSavePartition(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final SequenceGeneration sequenceGeneration,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlSavePartition(
connectorContext,
hiveJdbcTemplate,
sequenceGeneration,
serviceMetric
);
}
/**
* Service to generate sequence ids.
*
* @param hiveJdbcTemplate hive JDBC template
* @return HiveConnectorPartitionService
*/
@Bean
public SequenceGeneration sequenceGeneration(
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate
) {
return new SequenceGeneration(hiveJdbcTemplate);
}
/**
* Data access service for table.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @param directSqlSavePartition partition service involving direct sqls
* @param warehouse warehouse
* @return DirectSqlTable
*/
@Bean
public DirectSqlTable directSqlTable(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric,
final DirectSqlSavePartition directSqlSavePartition,
final Warehouse warehouse
) {
return new DirectSqlTable(
connectorContext,
hiveJdbcTemplate,
serviceMetric,
directSqlSavePartition,
warehouse
);
}
/**
* Data access service for database.
*
* @param connectorContext connector config
* @param hiveJdbcTemplate hive JDBC template
* @param serviceMetric fast service metric
* @return DirectSqlDatabase
*/
@Bean
public DirectSqlDatabase directSqlDatabase(
final ConnectorContext connectorContext,
@Qualifier("hiveWriteJdbcTemplate") final JdbcTemplate hiveJdbcTemplate,
final HiveConnectorFastServiceMetric serviceMetric
) {
return new DirectSqlDatabase(
connectorContext,
hiveJdbcTemplate,
serviceMetric
);
}
/**
* create hive connector fast table service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param hiveConnectorDatabaseService hive database service
* @param connectorContext server context
* @param directSqlTable table jpa service
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @param hiveConnectorFastTableServiceProxy hive connector fast table service proxy
* @return HiveConnectorFastTableService
*/
@Bean
public HiveConnectorTableService hiveTableService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final HiveConnectorDatabaseService hiveConnectorDatabaseService,
final ConnectorContext connectorContext,
final DirectSqlTable directSqlTable,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler,
final HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy
) {
return new HiveConnectorFastTableService(
connectorContext.getCatalogName(),
metacatHiveClient,
hiveConnectorDatabaseService,
hiveMetacatConverters,
connectorContext,
directSqlTable,
icebergTableHandler,
commonViewHandler,
hiveConnectorFastTableServiceProxy
);
}
/**
* create hive connector fast table service proxy.
*
* @param hiveMetacatConverters hive metacat converters
* @param icebergTableHandler iceberg table handler
* @param commonViewHandler common view handler
* @return HiveConnectorFastTableServiceProxy
*/
@Bean
public HiveConnectorFastTableServiceProxy hiveConnectorFastTableServiceProxy(
final HiveConnectorInfoConverter hiveMetacatConverters,
final IcebergTableHandler icebergTableHandler,
final CommonViewHandler commonViewHandler
) {
return new HiveConnectorFastTableServiceProxy(
hiveMetacatConverters,
icebergTableHandler,
commonViewHandler
);
}
/**
* create hive connector fast database service.
*
* @param metacatHiveClient metacat hive client
* @param hiveMetacatConverters hive metacat converters
* @param directSqlDatabase database sql service
* @return HiveConnectorDatabaseService
*/
@Bean
public HiveConnectorDatabaseService hiveDatabaseService(
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final DirectSqlDatabase directSqlDatabase
) {
return new HiveConnectorFastDatabaseService(
metacatHiveClient,
hiveMetacatConverters,
directSqlDatabase
);
}
/**
* Create iceberg table handler.
* @param connectorContext server context
* @param icebergTableCriteria iceberg table criteria
* @param icebergTableOpWrapper iceberg table operation
* @param icebergTableOpsProxy IcebergTableOps proxy
* @return IcebergTableHandler
*/
@Bean
public IcebergTableHandler icebergTableHandler(final ConnectorContext connectorContext,
final IcebergTableCriteria icebergTableCriteria,
final IcebergTableOpWrapper icebergTableOpWrapper,
final IcebergTableOpsProxy icebergTableOpsProxy) {
return new IcebergTableHandler(connectorContext,
icebergTableCriteria,
icebergTableOpWrapper,
icebergTableOpsProxy);
}
/**
*
* Create iceberg table criteria.
* @param connectorContext server context
* @return IcebergTableCriteria
*/
@Bean
public IcebergTableCriteria icebergTableCriteria(final ConnectorContext connectorContext) {
return new IcebergTableCriteriaImpl(connectorContext);
}
/**
* Create iceberg table operation wrapper.
* @param connectorContext server context
* @param threadServiceManager executor service
* @return IcebergTableOpWrapper
*/
@Bean
public IcebergTableOpWrapper icebergTableOpWrapper(final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager) {
return new IcebergTableOpWrapper(connectorContext, threadServiceManager);
}
/**
* Create commonViewHandler.
*
* @param connectorContext server context
* @return CommonViewHandler
*/
@Bean
public CommonViewHandler commonViewHandler(final ConnectorContext connectorContext) {
return new CommonViewHandler(connectorContext);
}
/**
* Create IcebergTableOps proxy.
* @return IcebergTableOpsProxy
*/
@Bean
public IcebergTableOpsProxy icebergTableOps() {
return new IcebergTableOpsProxy();
}
}
| 1,583 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive connector config.
*
* @author zhenl
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.configs;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,584 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/configs/HiveConnectorClientConfig.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.configs;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.client.embedded.EmbeddedHiveClient;
import com.netflix.metacat.connector.hive.metastore.HMSHandlerProxy;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
/**
* Hive Connector Client Config.
*
* @author zhenl
* @since 1.1.0
*/
@Configuration
@ConditionalOnProperty(value = "useEmbeddedClient", havingValue = "true")
public class HiveConnectorClientConfig {
/** Default Query timeout in milliseconds. */
private static final int DEFAULT_DATASTORE_TIMEOUT = 60000;
/** Default Query timeout in milliseconds for reads. */
private static final int DEFAULT_DATASTORE_READ_TIMEOUT = 120000;
/** Default Query timeout in milliseconds for writes. */
private static final int DEFAULT_DATASTORE_WRITE_TIMEOUT = 120000;
/**
* create local hive client.
*
* @param connectorContext connector config context
* @return IMetacatHiveClient
* @throws Exception exception
*/
@Bean
public IMetacatHiveClient createLocalClient(final ConnectorContext connectorContext) throws Exception {
try {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
DataSourceManager.get().load(
connectorContext.getCatalogShardName(),
connectorContext.getConfiguration()
);
return new EmbeddedHiveClient(
connectorContext.getCatalogName(),
HMSHandlerProxy.getProxy(conf, connectorContext.getRegistry()),
connectorContext.getRegistry()
);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"Failed creating the hive metastore client for catalog: %s",
connectorContext.getCatalogName()
),
e
);
}
}
/**
* create warehouse for file system calls.
*
* @param connectorContext connector config context
* @return WareHouse
*/
@Bean
public Warehouse warehouse(final ConnectorContext connectorContext) {
try {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
return new Warehouse(conf);
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"Failed creating the hive warehouse for catalog: %s",
connectorContext.getCatalogName()
),
e
);
}
}
/**
* hive DataSource.
*
* @param connectorContext connector config.
* @return data source
*/
@Bean
public DataSource hiveDataSource(final ConnectorContext connectorContext) {
final HiveConf conf = this.getDefaultConf(connectorContext);
connectorContext.getConfiguration().forEach(conf::set);
DataSourceManager.get().load(
connectorContext.getCatalogShardName(),
connectorContext.getConfiguration()
);
return DataSourceManager.get().get(connectorContext.getCatalogShardName());
}
/**
* hive metadata Transaction Manager.
*
* @param hiveDataSource hive data source
* @return hive transaction manager
*/
@Bean
public DataSourceTransactionManager hiveTxManager(
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
return new DataSourceTransactionManager(hiveDataSource);
}
/**
* hive metadata read JDBC template. Query timeout is set to control long running read queries.
*
* @param connectorContext connector config.
* @param hiveDataSource hive data source
* @return hive JDBC Template
*/
@Bean
public JdbcTemplate hiveReadJdbcTemplate(
final ConnectorContext connectorContext,
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
final JdbcTemplate result = new JdbcTemplate(hiveDataSource);
result.setQueryTimeout(getDataStoreReadTimeout(connectorContext) / 1000);
return result;
}
/**
* hive metadata write JDBC template. Query timeout is set to control long running write queries.
*
* @param connectorContext connector config.
* @param hiveDataSource hive data source
* @return hive JDBC Template
*/
@Bean
public JdbcTemplate hiveWriteJdbcTemplate(
final ConnectorContext connectorContext,
@Qualifier("hiveDataSource") final DataSource hiveDataSource) {
final JdbcTemplate result = new JdbcTemplate(hiveDataSource);
result.setQueryTimeout(getDataStoreWriteTimeout(connectorContext) / 1000);
return result;
}
@VisibleForTesting
private HiveConf getDefaultConf(
final ConnectorContext connectorContext
) {
final HiveConf result = new HiveConf();
result.setBoolean(HiveConfigConstants.USE_METASTORE_LOCAL, true);
final int dataStoreTimeout = getDataStoreTimeout(connectorContext);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT, dataStoreTimeout);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT, dataStoreTimeout);
result.setInt(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT, getDataStoreWriteTimeout(connectorContext));
result.setInt(HiveConfigConstants.HIVE_METASTORE_DS_RETRY, 0);
result.setInt(HiveConfigConstants.HIVE_HMSHANDLER_RETRY, 0);
result.set(
HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY_CLASS,
HiveConfigConstants.JAVAX_JDO_PERSISTENCEMANAGER_FACTORY
);
result.setBoolean(HiveConfigConstants.HIVE_STATS_AUTOGATHER, false);
return result;
}
private int getDataStoreTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT));
} catch (final Exception ignored) { }
return result;
}
private int getDataStoreReadTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_READ_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT));
} catch (final Exception ignored) { }
return result;
}
private int getDataStoreWriteTimeout(final ConnectorContext connectorContext) {
int result = DEFAULT_DATASTORE_WRITE_TIMEOUT;
try {
result = Integer.parseInt(
connectorContext.getConfiguration().get(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT));
} catch (final Exception ignored) { }
return result;
}
}
| 1,585 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/monitoring/HiveMetrics.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//CHECKSTYLE:OFF
package com.netflix.metacat.connector.hive.monitoring;
import lombok.Getter;
/**
* Hive Metrics.
*
* @author zhenl
* @since 1.0.0
*/
@Getter
public enum HiveMetrics {
/**
* hive sql lock error.
*/
CounterHiveSqlLockError(Type.counter, "hiveSqlLockError"),
CounterHiveGetTablePartitionsTimeoutFailure(Type.counter,"getPartitionsTimeoutFailure"),
CounterHiveExperimentGetTablePartitionsFailure(Type.counter,"experimentGetPartitionsFailure"),
CounterHivePartitionPathIsNotDir(Type.counter,"partitionPathIsNotDir"),
CounterHivePartitionFileSystemCall(Type.counter,"partitionFileSystemCall"),
CounterHiveGetPartitionsExceedThresholdFailure(Type.counter,"getPartitionsExceedThresholdFailure"),
CounterHiveFileSystemFailure(Type.counter,"fileSystemFailure"),
CounterFileSystemReadFailure(Type.counter,"fileSystemReadFailure"),
/**
* Gauge.
*/
GaugeAddPartitions(Type.gauge, "partitionAdd"),
GaugeDeletePartitions(Type.gauge, "partitionDelete"),
GaugeGetPartitionsCount(Type.gauge, "partitionGet"),
GaugePreExpressionFilterGetPartitionsCount(Type.gauge, "preExpressionFilterGetPartitionsCount"),
/**
* Timer.
*/
TimerHiveRequest(Type.timer, "embeddedclient.requests"), TimerFastHiveRequest(Type.timer, "fast.requests"),
/**
* hive function names.
*/
TagCreateDatabase("createDatabase"),
TagCreateTable("createTable"),
TagDropDatabase("dropDatabase"),
TagDropHivePartitions("dropHivePartitions"),
TagAlterDatabase("alterDatabase"),
TagGetAllDatabases("getAllDatabases"),
TagGetDatabase("getDatabase"),
TagGetAllTables("getAllTables"),
TagGetTableNamesByFilter("getTableNamesByFilter"),
TagGetTableByName("getTableByName"),
TagLoadTable("loadTable"),
TagAlterTable("alterTable"),
TagAddPartitions("addPartitions"),
TagAlterPartitions("alterPartitions"),
TagCreatePartitionLocations("createPartitionLocations"),
TagAddDropPartitions("addDropPartitions"),
TagDropTable("dropTable"),
TagRename("rename"),
TagListPartitionsByFilter("listPartitionsByFilter"),
TagGetPartitions("getPartitions"),
TagGetPartitionCount("getPartitionCount"),
TagGetPartitionKeys("getPartitionKeys"),
TagGetPartitionNames("getPartitionNames"),
TagGetTableNames("getTableNames"),
TagTableExists("tableExists");
enum Type {
counter,
gauge,
timer
}
private final String metricName;
HiveMetrics(final Type type, final String measure) {
this.metricName = String.format("metacat.hive.%s.%s", type.name(), measure);
}
HiveMetrics(final String name) {
this.metricName = name;
}
@Override
public String toString() {
return metricName;
}
}
| 1,586 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/monitoring/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Hive monitor.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.monitoring;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,587 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive client implementation.
* @author zhenl
* @since 1.1.0
*/
package com.netflix.metacat.connector.hive.client;
| 1,588 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/HiveMetastoreClientFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import com.google.common.net.HostAndPort;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import javax.annotation.Nullable;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.Socket;
import java.net.SocketAddress;
/**
* HiveMetastoreClientFactory.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveMetastoreClientFactory {
private final HostAndPort socksProxy;
private int timeoutMillis = 3000;
/**
* Constructor.
*
* @param socksProxy address
* @param timeoutMillis timeoutMillis
*/
public HiveMetastoreClientFactory(@Nullable final HostAndPort socksProxy,
final int timeoutMillis) {
this.socksProxy = socksProxy;
this.timeoutMillis = timeoutMillis;
}
private static Socket createSocksSocket(final HostAndPort proxy) {
final SocketAddress address = InetSocketAddress.createUnresolved(proxy.getHostText(), proxy.getPort());
return new Socket(new Proxy(Proxy.Type.SOCKS, address));
}
private static TTransportException rewriteException(final TTransportException e, final String host) {
return new TTransportException(e.getType(), String.format("%s: %s", host, e.getMessage()), e.getCause());
}
/**
* create.
*
* @param host hostname
* @param port portnum
* @return HiveMetastoreClient
* @throws TTransportException TTransportException
*/
public HiveMetastoreClient create(final String host, final int port)
throws TTransportException {
return new HiveMetastoreClient(createTransport(host, port));
}
protected TTransport createRawTransport(final String host, final int port)
throws TTransportException {
if (socksProxy == null) {
final TTransport transport = new TSocket(host, port, timeoutMillis);
try {
transport.open();
return transport;
} catch (Throwable t) {
transport.close();
throw t;
}
}
final Socket socks = createSocksSocket(socksProxy);
try {
try {
socks.connect(InetSocketAddress.createUnresolved(host, port), timeoutMillis);
socks.setSoTimeout(timeoutMillis);
return new TSocket(socks);
} catch (Throwable t) {
closeQuietly(socks);
throw t;
}
} catch (IOException e) {
throw new TTransportException(e);
}
}
protected TTransport createTransport(final String host, final int port)
throws TTransportException {
try {
return new TTransportWrapper(createRawTransport(host, port), host);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
private static void closeQuietly(final Closeable closeable) {
try {
closeable.close();
} catch (IOException e) {
// ignored
}
}
private static class TTransportWrapper
extends TTransport {
private final TTransport transport;
private final String host;
TTransportWrapper(final TTransport transport, final String host) {
this.transport = transport;
this.host = host;
}
@Override
public boolean isOpen() {
return transport.isOpen();
}
@Override
public boolean peek() {
return transport.peek();
}
@Override
public byte[] getBuffer() {
return transport.getBuffer();
}
@Override
public int getBufferPosition() {
return transport.getBufferPosition();
}
@Override
public int getBytesRemainingInBuffer() {
return transport.getBytesRemainingInBuffer();
}
@Override
public void consumeBuffer(final int len) {
transport.consumeBuffer(len);
}
@Override
public void close() {
transport.close();
}
@Override
public void open()
throws TTransportException {
try {
transport.open();
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public int readAll(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
return transport.readAll(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public int read(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
return transport.read(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void write(final byte[] bytes)
throws TTransportException {
try {
transport.write(bytes);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void write(final byte[] bytes, final int off, final int len)
throws TTransportException {
try {
transport.write(bytes, off, len);
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
@Override
public void flush()
throws TTransportException {
try {
transport.flush();
} catch (TTransportException e) {
throw rewriteException(e, host);
}
}
}
}
| 1,589 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/MetacatHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.connectors.exception.InvalidMetaException;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.RequestPartsSpec;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransportException;
import javax.annotation.Nullable;
import java.net.URI;
import java.util.List;
import java.util.Set;
/**
* MetacatHiveClient.
*
* @author zhenl
* @since 1.0.0
*/
public class MetacatHiveClient implements IMetacatHiveClient {
private static final short ALL_RESULTS = -1;
private HiveMetastoreClientFactory hiveMetastoreClientFactory;
private final String host;
private final int port;
/**
* Constructor.
*
* @param address address
* @param hiveMetastoreClientFactory hiveMetastoreClientFactory
* @throws MetaException exception
*/
public MetacatHiveClient(final URI address,
final HiveMetastoreClientFactory hiveMetastoreClientFactory)
throws MetaException {
this.hiveMetastoreClientFactory = hiveMetastoreClientFactory;
Preconditions.checkArgument(address.getHost() != null, "metastoreUri host is missing: " + address);
Preconditions.checkArgument(address.getPort() != -1, "metastoreUri port is missing: " + address);
this.host = address.getHost();
this.port = address.getPort();
}
/**
* Create a metastore client instance.
*
* @return hivemetastore client
*/
private HiveMetastoreClient createMetastoreClient() {
try {
return hiveMetastoreClientFactory.create(host, port);
} catch (TTransportException e) {
throw new RuntimeException("Failed connecting to Hive metastore: " + host + ":" + port, e);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllDatabases() throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_all_databases();
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllTables(final String databaseName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_all_tables(databaseName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_table_names_by_filter(databaseName, filter, (short) limit);
}
}
/**
* {@inheritDoc}.
*/
@Override
public Table getTableByName(final String databaseName,
final String tableName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_table(databaseName, tableName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createTable(final Table table) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.create_table(table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropTable(final String databaseName,
final String tableName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.drop_table(databaseName, tableName, false);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void rename(final String databaseName,
final String oldName,
final String newdatabadeName,
final String newName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
final Table table = client.get_table(databaseName, oldName);
client.drop_table(databaseName, oldName, false);
table.setDbName(newdatabadeName);
table.setTableName(newName);
client.create_table(table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_table(databaseName, tableName, table);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterDatabase(final String databaseName,
final Database database) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_database(databaseName, database);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createDatabase(final Database database) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.create_database(database);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropDatabase(final String dbName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.drop_database(dbName, false, false);
}
}
/**
* {@inheritDoc}.
*/
@Override
public Database getDatabase(final String databaseName) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_database(databaseName);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
if (partitionNames != null && !partitionNames.isEmpty()) {
return client.get_partitions_by_names(databaseName, tableName, partitionNames);
} else {
return client.get_partitions(databaseName, tableName, ALL_RESULTS);
}
}
}
/**
* {@inheritDoc}.
*/
@Override
public void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
dropHivePartitions(createMetastoreClient(), databaseName, tableName, partitionNames);
}
/**
* {@inheritDoc}.
*/
@Override
public List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_partitions_by_filter(databaseName, tableName, filter, ALL_RESULTS);
}
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(final String databaseName,
final String tableName) throws TException {
return getPartitions(databaseName, tableName, null).size();
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
return client.get_partition_names(databaseName, tableName, ALL_RESULTS);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void savePartitions(final List<Partition> partitions)
throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.add_partitions(partitions);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void alterPartitions(final String dbName, final String tableName,
final List<Partition> partitions) throws
TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
client.alter_partitions(dbName, tableName, partitions);
}
}
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName, final String tableName,
final List<Partition> partitions,
final List<String> delPartitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
try {
dropHivePartitions(client, dbName, tableName, delPartitionNames);
client.add_partitions(partitions);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
throw new TException(
String.format("Internal server error adding/dropping partitions for table %s.%s",
dbName, tableName), e);
}
}
}
private void dropHivePartitions(final HiveMetastoreClient client, final String dbName, final String tableName,
final List<String> partitionNames)
throws TException {
if (partitionNames != null && !partitionNames.isEmpty()) {
final DropPartitionsRequest request = new DropPartitionsRequest(dbName, tableName, new RequestPartsSpec(
RequestPartsSpec._Fields.NAMES, partitionNames));
request.setDeleteData(false);
client.drop_partitions_req(request);
}
}
/**
* getRoles.
* @param user user
* @return set of roles
*/
public Set<String> getRoles(final String user) {
return Sets.newHashSet();
}
}
| 1,590 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/HiveMetastoreClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.thrift;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TTransport;
import java.io.Closeable;
/**
* HiveMetastoreClient.
*
* @author zhenl
* @since 1.0.0
*/
public class HiveMetastoreClient
extends ThriftHiveMetastore.Client
implements Closeable {
private final TTransport transport;
/**
* Constructor.
*
* @param transport transport
*/
public HiveMetastoreClient(final TTransport transport) {
super(new TBinaryProtocol(transport));
this.transport = transport;
}
@Override
public void close() {
transport.close();
}
}
| 1,591 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/thrift/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* hive thrift client implementation.
*
* @author zhenl
* @since 1.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.client.thrift;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,592 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/EmbeddedHiveClient.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.partition.util.PartitionUtil;
import com.netflix.metacat.connector.hive.IMetacatHiveClient;
import com.netflix.metacat.connector.hive.metastore.IMetacatHMSHandler;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.datanucleus.exceptions.NucleusDataStoreException;
import javax.annotation.Nullable;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
/**
* Embedded hive metastore client implementation.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class EmbeddedHiveClient implements IMetacatHiveClient {
/**
* EXCEPTION_JDO_PREFIX.
*/
public static final String EXCEPTION_JDO_PREFIX = "javax.jdo.";
/**
* EXCEPTION_SQL_PREFIX.
*/
public static final String EXCEPTION_SQL_PREFIX = "java.sql.SQLException";
/**
* EX_MESSAGE_RESTART_TRANSACTION.
*/
public static final String EX_MESSAGE_RESTART_TRANSACTION = "restarting transaction";
/**
* DEFAULT_PRIVILEGES.
*/
private static final Set<HivePrivilege> DEFAULT_PRIVILEGES =
Sets.newHashSet(HivePrivilege.DELETE, HivePrivilege.INSERT, HivePrivilege.SELECT, HivePrivilege.UPDATE);
/**
* All results.
*/
private static final short ALL_RESULTS = -1;
private final IMetacatHMSHandler handler;
private final Registry registry;
private final Id requestTimerId;
private final Counter hiveSqlErrorCounter;
/**
* Embedded hive client implementation.
*
* @param catalogName catalogName
* @param handler handler
* @param registry registry
*/
public EmbeddedHiveClient(final String catalogName,
@Nullable final IMetacatHMSHandler handler,
final Registry registry) {
this.handler = handler;
this.registry = registry;
this.requestTimerId = registry.createId(HiveMetrics.TimerHiveRequest.getMetricName());
this.hiveSqlErrorCounter =
registry.counter(HiveMetrics.CounterHiveSqlLockError.getMetricName() + "." + catalogName);
}
@Override
public void shutdown() throws TException {
handler.shutdown();
}
private void handleSqlException(final TException ex) {
if ((ex.getCause() instanceof SQLException || ex.getMessage().startsWith(EXCEPTION_JDO_PREFIX)
|| ex.getMessage().contains(EXCEPTION_SQL_PREFIX))
&& ex.getMessage().contains(EX_MESSAGE_RESTART_TRANSACTION)) {
this.hiveSqlErrorCounter.increment();
}
}
/**
* {@inheritDoc}.
*/
@Override
public void createDatabase(final Database database) throws TException {
callWrap(HiveMetrics.TagCreateDatabase.getMetricName(), () -> {
handler.create_database(database);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void createTable(final Table table) throws TException {
callWrap(HiveMetrics.TagCreateTable.getMetricName(), () -> {
handler.create_table(table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropDatabase(final String dbName) throws TException {
callWrap(HiveMetrics.TagDropDatabase.getMetricName(), () -> {
handler.drop_database(dbName, false, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropPartitions(final String databaseName,
final String tableName,
final List<String> partitionNames) throws
TException {
dropHivePartitions(databaseName, tableName, partitionNames);
}
private void dropHivePartitions(final String dbName, final String tableName,
final List<String> partitionNames)
throws TException {
callWrap(HiveMetrics.TagDropHivePartitions.getMetricName(), () -> {
final List<List<String>> dropParts = new ArrayList<>();
for (String partName : partitionNames) {
dropParts.add(new ArrayList<>(PartitionUtil.getPartitionKeyValues(partName).values()));
}
handler.add_drop_partitions(dbName, tableName, Lists.newArrayList(), dropParts, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void alterDatabase(final String databaseName,
final Database database) throws TException {
callWrap(HiveMetrics.TagAlterDatabase.getMetricName(), () -> {
handler.alter_database(databaseName, database);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllDatabases() throws TException {
return callWrap(HiveMetrics.TagGetAllDatabases.getMetricName(), handler::get_all_databases);
}
/**
* {@inheritDoc}.
*/
@Override
public Set<HivePrivilege> getDatabasePrivileges(final String user, final String databaseName) {
return DEFAULT_PRIVILEGES;
}
/**
* {@inheritDoc}.
*/
@Override
public Database getDatabase(final String databaseName) throws TException {
return callWrap(HiveMetrics.TagGetDatabase.getMetricName(), () -> handler.get_database(databaseName));
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getAllTables(final String databaseName) throws TException {
return callWrap(HiveMetrics.TagGetAllTables.getMetricName(), () -> {
final List<String> tables = handler.get_all_tables(databaseName);
if (tables.isEmpty()) {
handler.get_database(databaseName);
}
return tables;
});
}
/**
* {@inheritDoc}.
*/
@Override
public List<String> getTableNames(final String databaseName, final String filter, final int limit)
throws TException {
return callWrap(HiveMetrics.TagGetTableNamesByFilter.getMetricName(),
() -> handler.get_table_names_by_filter(databaseName, filter, (short) limit));
}
/**
* {@inheritDoc}.
*/
@Override
public Table getTableByName(final String databaseName,
final String tableName) throws TException {
return callWrap(HiveMetrics.TagGetTableByName.getMetricName(), () -> loadTable(databaseName, tableName));
}
private Table loadTable(final String dbName, final String tableName) throws TException {
return callWrap(HiveMetrics.TagLoadTable.getMetricName(), () -> handler.get_table(dbName, tableName));
}
/**
* {@inheritDoc}.
*/
@Override
public void alterTable(final String databaseName,
final String tableName,
final Table table) throws TException {
callWrap(HiveMetrics.TagAlterTable.getMetricName(), () -> {
handler.alter_table(databaseName, tableName, table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void alterPartitions(final String dbName,
final String tableName,
final List<Partition> partitions) throws TException {
callWrap(HiveMetrics.TagAlterPartitions.getMetricName(), () -> {
handler.alter_partitions(dbName, tableName, partitions);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName,
final String tableName,
final List<Partition> addParts,
final List<String> dropPartNames) throws TException {
callWrap(HiveMetrics.TagAddDropPartitions.getMetricName(), () -> {
final List<List<String>> dropParts = new ArrayList<>();
for (String partName : dropPartNames) {
dropParts.add(new ArrayList<>(PartitionUtil.getPartitionKeyValues(partName).values()));
}
handler.add_drop_partitions(dbName, tableName, addParts, dropParts, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
public void dropTable(final String databaseName,
final String tableName) throws TException {
callWrap(HiveMetrics.TagDropTable.getMetricName(), () -> {
handler.drop_table(databaseName, tableName, false);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public void rename(final String databaseName,
final String oldTableName,
final String newdatabadeName,
final String newTableName) throws TException {
callWrap(HiveMetrics.TagRename.getMetricName(), () -> {
final Table table = new Table(loadTable(databaseName, oldTableName));
table.setDbName(newdatabadeName);
table.setTableName(newTableName);
handler.alter_table(databaseName, oldTableName, table);
return null;
});
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<Partition> getPartitions(final String databaseName,
final String tableName,
@Nullable final List<String> partitionNames) throws TException {
return callWrap(HiveMetrics.TagGetPartitions.getMetricName(), () -> {
if (partitionNames != null && !partitionNames.isEmpty()) {
return handler.get_partitions_by_names(databaseName, tableName, partitionNames);
}
return handler.get_partitions(databaseName, tableName, ALL_RESULTS);
});
}
/**
* {@inheritDoc}.
*/
@Override
public int getPartitionCount(final String databaseName,
final String tableName) throws TException {
return callWrap(HiveMetrics.TagGetPartitionCount.getMetricName(),
() -> getPartitions(databaseName, tableName, null).size());
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<String> getPartitionNames(final String databaseName,
final String tableName)
throws TException {
return callWrap(HiveMetrics.TagGetPartitionNames.getMetricName(),
() -> handler.get_partition_names(databaseName, tableName, ALL_RESULTS));
}
/**
* {@inheritDoc}.
*/
@Override
@SuppressWarnings("unchecked")
public List<Partition> listPartitionsByFilter(final String databaseName,
final String tableName,
final String filter
) throws TException {
return callWrap(HiveMetrics.TagListPartitionsByFilter.getMetricName(),
() -> handler.get_partitions_by_filter(databaseName, tableName, filter, ALL_RESULTS));
}
private <R> R callWrap(final String requestName, final Callable<R> supplier) throws TException {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", requestName);
try {
return supplier.call();
} catch (MetaException e) {
handleSqlException(e);
if (e.getCause() instanceof NucleusDataStoreException) {
throw new ConnectorException(e.getMessage(), e.getCause());
}
throw e;
} catch (TException e) {
handleSqlException(e);
throw e;
} catch (Exception e) {
throw new TException(e.getMessage(), e.getCause());
} finally {
final long duration = registry.clock().wallTime() - start;
log.debug("### Time taken to complete {} is {} ms", requestName,
duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
}
| 1,593 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/HivePersistenceManagerFactory.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
import com.google.common.collect.Maps;
import com.netflix.metacat.common.server.util.DataSourceManager;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import lombok.extern.slf4j.Slf4j;
import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
import javax.jdo.PersistenceManagerFactory;
import javax.sql.DataSource;
import java.util.Map;
/**
* HivePersistenceManagerFactory.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public final class HivePersistenceManagerFactory {
private static Map<String, PersistenceManagerFactory> factories = Maps.newConcurrentMap();
private HivePersistenceManagerFactory() {
}
/**
* getPersistenceManagerFactory.
*
* @param props props
* @return PersistenceManagerFactory
*/
public static PersistenceManagerFactory getPersistenceManagerFactory(final Map props) {
final String name = String.valueOf(props.get(HiveConfigConstants.JAVAX_JDO_OPTION_NAME));
PersistenceManagerFactory result = factories.get(name);
if (result == null) {
result = getpersistencemanagerfactory(props);
}
return result;
}
private static synchronized PersistenceManagerFactory getpersistencemanagerfactory(final Map props) {
final String name = String.valueOf(props.get(HiveConfigConstants.JAVAX_JDO_OPTION_NAME));
PersistenceManagerFactory result = factories.get(name);
if (result == null) {
final DataSource dataSource = DataSourceManager.get().get(name);
final Map<String, Object> properties = Maps.newHashMap();
properties.put(HiveConfigConstants.DATANUCLEUS_FIXEDDATASTORE,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_FIXEDDATASTORE, true));
properties.put(HiveConfigConstants.DATANUCLEUS_AUTOCREATESCHEMA,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_AUTOCREATESCHEMA, false));
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_RDBMS_CHECKEXISTTABLESORVIEWS, false));
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO,
props.getOrDefault(HiveConfigConstants.DATANUCLEUS_RDBMS_INITIALIZECOULUMNINFO, "None"));
properties.put(HiveConfigConstants.DATANUCLEUS_IDENTIFIERFACTORY,
HiveConfigConstants.DATANUCLEUS_DATANUCLEU1);
properties.put(HiveConfigConstants.DATANUCLEUS_CONNECTIONFACTORY, dataSource);
properties.put(HiveConfigConstants.DATANUCLEUS_RDBMS_USELEGACYNATIVEVALUESTRATEGY, true);
properties.put(HiveConfigConstants.DATANUCLEUS_TRANSACTIONISOLATION,
HiveConfigConstants.DATANUCLEUS_READCOMMITTED);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATETABLE, false);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATECONSTRAINTS, false);
properties.put(HiveConfigConstants.DATANUCLEUS_VALIDATECOLUMNS, false);
properties.put(HiveConfigConstants.DATANUCLEUS_CACHE_LEVEL2, false);
properties.put(HiveConfigConstants.DATANUCLEUS_CACHE_LEVEL2_TYPE, "none");
properties.put(HiveConfigConstants.DATANUCLEUS_PERSISTENCYBYREACHATCOMMIT, false);
properties.put(HiveConfigConstants.DATANUCLEUS_AUTOSTARTMECHANISMMODE, "Checked");
properties.put(HiveConfigConstants.DATANUCLEUS_DETACHALLONCOMMIT, true);
properties.put(HiveConfigConstants.DATANUCLEUS_DETACHALLONROLLBACK, true);
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTORETIMEOUT));
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTOREREADTIMEOUT));
properties.put(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT,
props.get(HiveConfigConstants.JAVAX_JDO_DATASTOREWRITETIMEOUT));
result = JDOPersistenceManagerFactory.getPersistenceManagerFactory(properties);
factories.put(name, result);
}
return result;
}
}
| 1,594 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/package-info.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Embedded hive metastore client implementation.
* @author zhenl
* @since 1.1.0
*/
@ParametersAreNonnullByDefault
package com.netflix.metacat.connector.hive.client.embedded;
import javax.annotation.ParametersAreNonnullByDefault;
| 1,595 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/client/embedded/HivePrivilege.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.client.embedded;
/**
* HivePrivilege.
*
* @author zhenl
* @since 1.0.0
*/
public enum HivePrivilege {
/**SELECT.*/
SELECT,
/**INSERT.*/
INSERT,
/**UPDATE.*/
UPDATE,
/**DELETE.*/
DELETE,
/**OWNERSHIP.*/
OWNERSHIP,
/**GRANT.*/
GRANT;
}
| 1,596 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/HMSHandlerProxy.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import com.google.common.base.Throwables;
import com.netflix.metacat.connector.hive.util.HiveConfigConstants;
import com.netflix.spectator.api.Registry;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Deadline;
import org.apache.hadoop.hive.metastore.api.MetaException;
import javax.jdo.JDODataStoreException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.concurrent.TimeUnit;
/**
* HMSHandlerProxy.
*
* @author zhenl
* @since 1.0.0
*/
@NoArgsConstructor
public final class HMSHandlerProxy implements InvocationHandler {
@Setter
private MetacatHMSHandler metacatHMSHandler;
private long timeout = 600000; //600s
private HMSHandlerProxy(final HiveConf hiveConf, final Registry registry) throws MetaException {
metacatHMSHandler =
new MetacatHMSHandler(HiveConfigConstants.HIVE_HMSHANDLER_NAME, hiveConf, registry, false);
metacatHMSHandler.init();
timeout = HiveConf.getTimeVar(hiveConf,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
}
/**
* getProxy.
*
* @param hiveConf hive configuration
* @param registry registry
* @return MetacatHMSHandler
* @throws Exception Exception
*/
public static IMetacatHMSHandler getProxy(final HiveConf hiveConf, final Registry registry)
throws Exception {
final HMSHandlerProxy handler = new HMSHandlerProxy(hiveConf, registry);
return (IMetacatHMSHandler) Proxy.newProxyInstance(
HMSHandlerProxy.class.getClassLoader(),
new Class[]{IMetacatHMSHandler.class}, handler);
}
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
Deadline.registerIfNot(timeout);
try {
Deadline.startTimer(method.getName());
final Object object = method.invoke(metacatHMSHandler, args);
Deadline.stopTimer();
return object;
} catch (InvocationTargetException e) {
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex instanceof JDODataStoreException) {
throw ex;
}
}
throw e.getCause();
}
}
}
| 1,597 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/IMetacatHMSHandler.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.thrift.TException;
import java.util.List;
/**
* IMetacatHMSHandler.
* @author zhenl
* @since 1.0.0
*/
public interface IMetacatHMSHandler extends IHMSHandler {
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws TException any internal exception
*/
@SuppressWarnings({"checkstyle:methodname"})
boolean add_drop_partitions(String databaseName,
String tableName, List<Partition> addParts,
List<List<String>> dropParts, boolean deleteData)
throws TException;
}
| 1,598 |
0 |
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive
|
Create_ds/metacat/metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/metastore/HiveMetaStoreFsImpl.java
|
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive.metastore;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.MetaStoreFS;
import org.apache.hadoop.hive.metastore.api.MetaException;
/**
* HiveMetaStoreFsImpl.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
public class HiveMetaStoreFsImpl implements MetaStoreFS {
@Override
public boolean deleteDir(final FileSystem fileSystem, final Path path,
final boolean b, final boolean b2, final Configuration entries)
throws MetaException {
log.info("No-op call for deleting '{}'", path);
return true;
}
}
| 1,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.