index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/BinaryDecoder.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* Defines common decoding methods for byte array decoders.
*
* @author Apache Software Foundation
* @version $Id: BinaryDecoder.java 1157192 2011-08-12 17:27:38Z ggregory $
*/
public interface BinaryDecoder extends Decoder {
/**
* Decodes a byte array and returns the results as a byte array.
*
* @param source A byte array which has been encoded with the
* appropriate encoder
*
* @return a byte array that contains decoded content
*
* @throws DecoderException A decoder exception is thrown
* if a Decoder encounters a failure condition during
* the decode process.
*/
byte[] decode(byte[] source) throws DecoderException;
}
| 3,900 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/DecoderException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* Thrown when there is a failure condition during the decoding process. This exception is thrown when a {@link Decoder}
* encounters a decoding specific exception such as invalid data, or characters outside of the expected range.
*
* @author Apache Software Foundation
* @version $Id: DecoderException.java 1157192 2011-08-12 17:27:38Z ggregory $
*/
public class DecoderException extends Exception {
/**
* Declares the Serial Version Uid.
*
* @see <a href="http://c2.com/cgi/wiki?AlwaysDeclareSerialVersionUid">Always Declare Serial Version Uid</a>
*/
private static final long serialVersionUID = 1L;
/**
* Constructs a new exception with <code>null</code> as its detail message. The cause is not initialized, and may
* subsequently be initialized by a call to {@link #initCause}.
*
* @since 1.4
*/
public DecoderException() {
super();
}
/**
* Constructs a new exception with the specified detail message. The cause is not initialized, and may subsequently
* be initialized by a call to {@link #initCause}.
*
* @param message
* The detail message which is saved for later retrieval by the {@link #getMessage()} method.
*/
public DecoderException(String message) {
super(message);
}
/**
* Constructsa new exception with the specified detail message and cause.
*
* <p>
* Note that the detail message associated with <code>cause</code> is not automatically incorporated into this
* exception's detail message.
* </p>
*
* @param message
* The detail message which is saved for later retrieval by the {@link #getMessage()} method.
* @param cause
* The cause which is saved for later retrieval by the {@link #getCause()} method. A <code>null</code>
* value is permitted, and indicates that the cause is nonexistent or unknown.
* @since 1.4
*/
public DecoderException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail message of <code>(cause==null ?
* null : cause.toString())</code> (which typically contains the class and detail message of <code>cause</code>).
* This constructor is useful for exceptions that are little more than wrappers for other throwables.
*
* @param cause
* The cause which is saved for later retrieval by the {@link #getCause()} method. A <code>null</code>
* value is permitted, and indicates that the cause is nonexistent or unknown.
* @since 1.4
*/
public DecoderException(Throwable cause) {
super(cause);
}
}
| 3,901 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/Decoder.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* <p>Provides the highest level of abstraction for Decoders.
* This is the sister interface of {@link Encoder}. All
* Decoders implement this common generic interface.</p>
*
* <p>Allows a user to pass a generic Object to any Decoder
* implementation in the codec package.</p>
*
* <p>One of the two interfaces at the center of the codec package.</p>
*
* @author Apache Software Foundation
* @version $Id: Decoder.java 1157192 2011-08-12 17:27:38Z ggregory $
*/
public interface Decoder {
/**
* Decodes an "encoded" Object and returns a "decoded"
* Object. Note that the implementation of this
* interface will try to cast the Object parameter
* to the specific type expected by a particular Decoder
* implementation. If a {@link ClassCastException} occurs
* this decode method will throw a DecoderException.
*
* @param source the object to decode
*
* @return a 'decoded" object
*
* @throws DecoderException a decoder exception can
* be thrown for any number of reasons. Some good
* candidates are that the parameter passed to this
* method is null, a param cannot be cast to the
* appropriate type for a specific encoder.
*/
Object decode(Object source) throws DecoderException;
}
| 3,902 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/EncoderException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* Thrown when there is a failure condition during the encoding process. This exception is thrown when an
* {@link Encoder} encounters a encoding specific exception such as invalid data, inability to calculate a checksum,
* characters outside of the expected range.
*
* @author Apache Software Foundation
* @version $Id: EncoderException.java 1157192 2011-08-12 17:27:38Z ggregory $
*/
public class EncoderException extends Exception {
/**
* Declares the Serial Version Uid.
*
* @see <a href="http://c2.com/cgi/wiki?AlwaysDeclareSerialVersionUid">Always Declare Serial Version Uid</a>
*/
private static final long serialVersionUID = 1L;
/**
* Constructs a new exception with <code>null</code> as its detail message. The cause is not initialized, and may
* subsequently be initialized by a call to {@link #initCause}.
*
* @since 1.4
*/
public EncoderException() {
super();
}
/**
* Constructs a new exception with the specified detail message. The cause is not initialized, and may subsequently
* be initialized by a call to {@link #initCause}.
*
* @param message
* a useful message relating to the encoder specific error.
*/
public EncoderException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified detail message and cause.
*
* <p>
* Note that the detail message associated with <code>cause</code> is not automatically incorporated into this
* exception's detail message.
* </p>
*
* @param message
* The detail message which is saved for later retrieval by the {@link #getMessage()} method.
* @param cause
* The cause which is saved for later retrieval by the {@link #getCause()} method. A <code>null</code>
* value is permitted, and indicates that the cause is nonexistent or unknown.
* @since 1.4
*/
public EncoderException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail message of <code>(cause==null ?
* null : cause.toString())</code> (which typically contains the class and detail message of <code>cause</code>).
* This constructor is useful for exceptions that are little more than wrappers for other throwables.
*
* @param cause
* The cause which is saved for later retrieval by the {@link #getCause()} method. A <code>null</code>
* value is permitted, and indicates that the cause is nonexistent or unknown.
* @since 1.4
*/
public EncoderException(Throwable cause) {
super(cause);
}
}
| 3,903 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/CharEncoding.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* Character encoding names required of every implementation of the Java platform.
*
* From the Java documentation <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard
* charsets</a>:
* <p>
* <cite>Every implementation of the Java platform is required to support the following character encodings. Consult the
* release documentation for your implementation to see if any other encodings are supported. Consult the release
* documentation for your implementation to see if any other encodings are supported. </cite>
* </p>
*
* <ul>
* <li><code>US-ASCII</code><br/>
* Seven-bit ASCII, a.k.a. ISO646-US, a.k.a. the Basic Latin block of the Unicode character set.</li>
* <li><code>ISO-8859-1</code><br/>
* ISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1.</li>
* <li><code>UTF-8</code><br/>
* Eight-bit Unicode Transformation Format.</li>
* <li><code>UTF-16BE</code><br/>
* Sixteen-bit Unicode Transformation Format, big-endian byte order.</li>
* <li><code>UTF-16LE</code><br/>
* Sixteen-bit Unicode Transformation Format, little-endian byte order.</li>
* <li><code>UTF-16</code><br/>
* Sixteen-bit Unicode Transformation Format, byte order specified by a mandatory initial byte-order mark (either order
* accepted on input, big-endian used on output.)</li>
* </ul>
*
* This perhaps would best belong in the [lang] project. Even if a similar interface is defined in [lang], it is not
* foreseen that [codec] would be made to depend on [lang].
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @author Apache Software Foundation
* @since 1.4
* @version $Id: CharEncoding.java 1170351 2011-09-13 21:09:09Z ggregory $
*/
public class CharEncoding {
/**
* CharEncodingISO Latin Alphabet No. 1, a.k.a. ISO-LATIN-1. </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String ISO_8859_1 = "ISO-8859-1";
/**
* <p>
* Seven-bit ASCII, also known as ISO646-US, also known as the Basic Latin block of the Unicode character set.
* </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String US_ASCII = "US-ASCII";
/**
* <p>
* Sixteen-bit Unicode Transformation Format, The byte order specified by a mandatory initial byte-order mark
* (either order accepted on input, big-endian used on output)
* </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String UTF_16 = "UTF-16";
/**
* <p>
* Sixteen-bit Unicode Transformation Format, big-endian byte order.
* </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String UTF_16BE = "UTF-16BE";
/**
* <p>
* Sixteen-bit Unicode Transformation Format, little-endian byte order.
* </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String UTF_16LE = "UTF-16LE";
/**
* <p>
* Eight-bit Unicode Transformation Format.
* </p>
* <p>
* Every implementation of the Java platform is required to support this character encoding.
* </p>
*
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*/
public static final String UTF_8 = "UTF-8";
}
| 3,904 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/BinaryEncoder.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* Defines common encoding methods for byte array encoders.
*
* @author Apache Software Foundation
* @version $Id: BinaryEncoder.java 1157192 2011-08-12 17:27:38Z ggregory $
*/
public interface BinaryEncoder extends Encoder {
/**
* Encodes a byte array and return the encoded data
* as a byte array.
*
* @param source Data to be encoded
*
* @return A byte array containing the encoded data
*
* @throws EncoderException thrown if the Encoder
* encounters a failure condition during the
* encoding process.
*/
byte[] encode(byte[] source) throws EncoderException;
}
| 3,905 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/Encoder.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec;
/**
* <p>Provides the highest level of abstraction for Encoders.
* This is the sister interface of {@link Decoder}. Every implementation of
* Encoder provides this common generic interface which allows a user to pass a
* generic Object to any Encoder implementation in the codec package.</p>
*
* @author Apache Software Foundation
* @version $Id: Encoder.java 1170351 2011-09-13 21:09:09Z ggregory $
*/
public interface Encoder {
/**
* Encodes an "Object" and returns the encoded content
* as an Object. The Objects here may just be <code>byte[]</code>
* or <code>String</code>s depending on the implementation used.
*
* @param source An object to encode
*
* @return An "encoded" Object
*
* @throws EncoderException an encoder exception is
* thrown if the encoder experiences a failure
* condition during the encoding process.
*/
Object encode(Object source) throws EncoderException;
}
| 3,906 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/binary/StringUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec.binary;
import com.paypal.base.codec.CharEncoding;
import java.io.UnsupportedEncodingException;
/**
* Converts String to and from bytes using the encodings required by the Java specification. These encodings are specified in <a
* href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
*
* @see CharEncoding
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @author <a href="mailto:[email protected]">Gary Gregory</a>
* @version $Id: StringUtils.java 1170351 2011-09-13 21:09:09Z ggregory $
* @since 1.4
*/
public class StringUtils {
/**
* Encodes the given string into a sequence of bytes using the ISO-8859-1 charset, storing the result into a new
* byte array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesIso8859_1(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.ISO_8859_1);
}
/**
* Encodes the given string into a sequence of bytes using the US-ASCII charset, storing the result into a new byte
* array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUsAscii(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.US_ASCII);
}
/**
* Encodes the given string into a sequence of bytes using the UTF-16 charset, storing the result into a new byte
* array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf16(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.UTF_16);
}
/**
* Encodes the given string into a sequence of bytes using the UTF-16BE charset, storing the result into a new byte
* array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf16Be(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.UTF_16BE);
}
/**
* Encodes the given string into a sequence of bytes using the UTF-16LE charset, storing the result into a new byte
* array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf16Le(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.UTF_16LE);
}
/**
* Encodes the given string into a sequence of bytes using the UTF-8 charset, storing the result into a new byte
* array.
*
* @param string
* the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when the charset is missing, which should be never according the the Java specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf8(String string) {
return StringUtils.getBytesUnchecked(string, CharEncoding.UTF_8);
}
/**
* Encodes the given string into a sequence of bytes using the named charset, storing the result into a new byte
* array.
* <p>
* This method catches {@link UnsupportedEncodingException} and rethrows it as {@link IllegalStateException}, which
* should never happen for a required charset name. Use this method when the encoding is required to be in the JRE.
* </p>
*
* @param string
* the String to encode, may be <code>null</code>
* @param charsetName
* The name of a required {@link java.nio.charset.Charset}
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen for a
* required charset name.
* @see CharEncoding
* @see String#getBytes(String)
*/
public static byte[] getBytesUnchecked(String string, String charsetName) {
if (string == null) {
return null;
}
try {
return string.getBytes(charsetName);
} catch (UnsupportedEncodingException e) {
throw StringUtils.newIllegalStateException(charsetName, e);
}
}
private static IllegalStateException newIllegalStateException(String charsetName, UnsupportedEncodingException e) {
return new IllegalStateException(charsetName + ": " + e);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the given charset.
* <p>
* This method catches {@link UnsupportedEncodingException} and re-throws it as {@link IllegalStateException}, which
* should never happen for a required charset name. Use this method when the encoding is required to be in the JRE.
* </p>
*
* @param bytes
* The bytes to be decoded into characters, may be <code>null</code>
* @param charsetName
* The name of a required {@link java.nio.charset.Charset}
* @return A new <code>String</code> decoded from the specified array of bytes using the given charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen for a
* required charset name.
* @see CharEncoding
* @see String#String(byte[], String)
*/
public static String newString(byte[] bytes, String charsetName) {
if (bytes == null) {
return null;
}
try {
return new String(bytes, charsetName);
} catch (UnsupportedEncodingException e) {
throw StringUtils.newIllegalStateException(charsetName, e);
}
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the ISO-8859-1 charset.
*
* @param bytes
* The bytes to be decoded into characters, may be <code>null</code>
* @return A new <code>String</code> decoded from the specified array of bytes using the ISO-8859-1 charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringIso8859_1(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.ISO_8859_1);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the US-ASCII charset.
*
* @param bytes
* The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the US-ASCII charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringUsAscii(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.US_ASCII);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-16 charset.
*
* @param bytes
* The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-16 charset
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringUtf16(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.UTF_16);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-16BE charset.
*
* @param bytes
* The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-16BE charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringUtf16Be(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.UTF_16BE);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-16LE charset.
*
* @param bytes
* The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-16LE charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringUtf16Le(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.UTF_16LE);
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-8 charset.
*
* @param bytes
* The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-8 charset,
* or <code>null</code> if the input byte array was <code>null</code>.
* @throws IllegalStateException
* Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen since the
* charset is required.
*/
public static String newStringUtf8(byte[] bytes) {
return StringUtils.newString(bytes, CharEncoding.UTF_8);
}
}
| 3,907 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/binary/Base64.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec.binary;
import java.math.BigInteger;
/**
* Provides Base64 encoding and decoding as defined by <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045</a>.
*
* <p>
* This class implements section <cite>6.8. Base64 Content-Transfer-Encoding</cite> from RFC 2045 <cite>Multipurpose
* Internet Mail Extensions (MIME) Part One: Format of Internet Message Bodies</cite> by Freed and Borenstein.
* </p>
* <p>
* The class can be parameterized in the following manner with various constructors:
* <ul>
* <li>URL-safe mode: Default off.</li>
* <li>Line length: Default 76. Line length that aren't multiples of 4 will still essentially end up being multiples of
* 4 in the encoded data.
* <li>Line separator: Default is CRLF ("\r\n")</li>
* </ul>
* </p>
* <p>
* Since this class operates directly on byte streams, and not character streams, it is hard-coded to only encode/decode
* character encodings which are compatible with the lower 127 ASCII chart (ISO-8859-1, Windows-1252, UTF-8, etc).
* </p>
* <p>
* This class is not thread-safe. Each thread should use its own instance.
* </p>
*
* @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045</a>
* @author Apache Software Foundation
* @since 1.0
* @version $Revision: 1201529 $
*/
public class Base64 extends BaseNCodec {
/**
* BASE32 characters are 6 bits in length.
* They are formed by taking a block of 3 octets to form a 24-bit string,
* which is converted into 4 BASE64 characters.
*/
private static final int BITS_PER_ENCODED_BYTE = 6;
private static final int BYTES_PER_UNENCODED_BLOCK = 3;
private static final int BYTES_PER_ENCODED_BLOCK = 4;
/**
* Chunk separator per RFC 2045 section 2.1.
*
* <p>
* N.B. The next major release may break compatibility and make this field private.
* </p>
*
* @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 2.1</a>
*/
static final byte[] CHUNK_SEPARATOR = {'\r', '\n'};
/**
* This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet"
* equivalents as specified in Table 1 of RFC 2045.
*
* Thanks to "commons" project in ws.apache.org for this code.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
*/
private static final byte[] STANDARD_ENCODE_TABLE = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'
};
/**
* This is a copy of the STANDARD_ENCODE_TABLE above, but with + and /
* changed to - and _ to make the encoded Base64 results more URL-SAFE.
* This table is only used when the Base64's mode is set to URL-SAFE.
*/
private static final byte[] URL_SAFE_ENCODE_TABLE = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_'
};
/**
* This array is a lookup table that translates Unicode characters drawn from the "Base64 Alphabet" (as specified in
* Table 1 of RFC 2045) into their 6-bit positive integer equivalents. Characters that are not in the Base64
* alphabet but fall within the bounds of the array are translated to -1.
*
* Note: '+' and '-' both decode to 62. '/' and '_' both decode to 63. This means decoder seamlessly handles both
* URL_SAFE and STANDARD base64. (The encoder, on the other hand, needs to know ahead of time what to emit).
*
* Thanks to "commons" project in ws.apache.org for this code.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
*/
private static final byte[] DECODE_TABLE = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, 62, -1, 63, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, -1, -1, -1, -1, 63, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
};
/**
* Base64 uses 6-bit fields.
*/
/** Mask used to extract 6 bits, used when encoding */
private static final int MASK_6BITS = 0x3f;
// The static final fields above are used for the original static byte[] methods on Base64.
// The private member fields below are used with the new streaming approach, which requires
// some state be preserved between calls of encode() and decode().
/**
* Encode table to use: either STANDARD or URL_SAFE. Note: the DECODE_TABLE above remains static because it is able
* to decode both STANDARD and URL_SAFE streams, but the encodeTable must be a member variable so we can switch
* between the two modes.
*/
private final byte[] encodeTable;
// Only one decode table currently; keep for consistency with Base32 code
private final byte[] decodeTable = DECODE_TABLE;
/**
* Line separator for encoding. Not used when decoding. Only used if lineLength > 0.
*/
private final byte[] lineSeparator;
/**
* Convenience variable to help us determine when our buffer is going to run out of room and needs resizing.
* <code>decodeSize = 3 + lineSeparator.length;</code>
*/
private final int decodeSize;
/**
* Convenience variable to help us determine when our buffer is going to run out of room and needs resizing.
* <code>encodeSize = 4 + lineSeparator.length;</code>
*/
private final int encodeSize;
/**
* Place holder for the bytes we're dealing with for our based logic.
* Bitwise operations store and extract the encoding or decoding from this variable.
*/
private int bitWorkArea;
/**
* Creates a Base64 codec used for decoding (all modes) and encoding in URL-unsafe mode.
* <p>
* When encoding the line length is 0 (no chunking), and the encoding table is STANDARD_ENCODE_TABLE.
* </p>
*
* <p>
* When decoding all variants are supported.
* </p>
*/
public Base64() {
this(0);
}
/**
* Creates a Base64 codec used for decoding (all modes) and encoding in the given URL-safe mode.
* <p>
* When encoding the line length is 76, the line separator is CRLF, and the encoding table is STANDARD_ENCODE_TABLE.
* </p>
*
* <p>
* When decoding all variants are supported.
* </p>
*
* @param urlSafe
* if <code>true</code>, URL-safe encoding is used. In most cases this should be set to
* <code>false</code>.
* @since 1.4
*/
public Base64(boolean urlSafe) {
this(MIME_CHUNK_SIZE, CHUNK_SEPARATOR, urlSafe);
}
/**
* Creates a Base64 codec used for decoding (all modes) and encoding in URL-unsafe mode.
* <p>
* When encoding the line length is given in the constructor, the line separator is CRLF, and the encoding table is
* STANDARD_ENCODE_TABLE.
* </p>
* <p>
* Line lengths that aren't multiples of 4 will still essentially end up being multiples of 4 in the encoded data.
* </p>
* <p>
* When decoding all variants are supported.
* </p>
*
* @param lineLength
* Each line of encoded data will be at most of the given length (rounded down to nearest multiple of 4).
* If lineLength <= 0, then the output will not be divided into lines (chunks). Ignored when decoding.
* @since 1.4
*/
public Base64(int lineLength) {
this(lineLength, CHUNK_SEPARATOR);
}
/**
* Creates a Base64 codec used for decoding (all modes) and encoding in URL-unsafe mode.
* <p>
* When encoding the line length and line separator are given in the constructor, and the encoding table is
* STANDARD_ENCODE_TABLE.
* </p>
* <p>
* Line lengths that aren't multiples of 4 will still essentially end up being multiples of 4 in the encoded data.
* </p>
* <p>
* When decoding all variants are supported.
* </p>
*
* @param lineLength
* Each line of encoded data will be at most of the given length (rounded down to nearest multiple of 4).
* If lineLength <= 0, then the output will not be divided into lines (chunks). Ignored when decoding.
* @param lineSeparator
* Each line of encoded data will end with this sequence of bytes.
* @throws IllegalArgumentException
* Thrown when the provided lineSeparator included some base64 characters.
* @since 1.4
*/
public Base64(int lineLength, byte[] lineSeparator) {
this(lineLength, lineSeparator, false);
}
/**
* Creates a Base64 codec used for decoding (all modes) and encoding in URL-unsafe mode.
* <p>
* When encoding the line length and line separator are given in the constructor, and the encoding table is
* STANDARD_ENCODE_TABLE.
* </p>
* <p>
* Line lengths that aren't multiples of 4 will still essentially end up being multiples of 4 in the encoded data.
* </p>
* <p>
* When decoding all variants are supported.
* </p>
*
* @param lineLength
* Each line of encoded data will be at most of the given length (rounded down to nearest multiple of 4).
* If lineLength <= 0, then the output will not be divided into lines (chunks). Ignored when decoding.
* @param lineSeparator
* Each line of encoded data will end with this sequence of bytes.
* @param urlSafe
* Instead of emitting '+' and '/' we emit '-' and '_' respectively. urlSafe is only applied to encode
* operations. Decoding seamlessly handles both modes.
* @throws IllegalArgumentException
* The provided lineSeparator included some base64 characters. That's not going to work!
* @since 1.4
*/
public Base64(int lineLength, byte[] lineSeparator, boolean urlSafe) {
super(BYTES_PER_UNENCODED_BLOCK, BYTES_PER_ENCODED_BLOCK,
lineLength,
lineSeparator == null ? 0 : lineSeparator.length);
// TODO could be simplified if there is no requirement to reject invalid line sep when length <=0
// @see test case Base64Test.testConstructors()
if (lineSeparator != null) {
if (containsAlphabetOrPad(lineSeparator)) {
String sep = StringUtils.newStringUtf8(lineSeparator);
throw new IllegalArgumentException("lineSeparator must not contain base64 characters: [" + sep + "]");
}
if (lineLength > 0){ // null line-sep forces no chunking rather than throwing IAE
this.encodeSize = BYTES_PER_ENCODED_BLOCK + lineSeparator.length;
this.lineSeparator = new byte[lineSeparator.length];
System.arraycopy(lineSeparator, 0, this.lineSeparator, 0, lineSeparator.length);
} else {
this.encodeSize = BYTES_PER_ENCODED_BLOCK;
this.lineSeparator = null;
}
} else {
this.encodeSize = BYTES_PER_ENCODED_BLOCK;
this.lineSeparator = null;
}
this.decodeSize = this.encodeSize - 1;
this.encodeTable = urlSafe ? URL_SAFE_ENCODE_TABLE : STANDARD_ENCODE_TABLE;
}
/**
* Returns our current encode mode. True if we're URL-SAFE, false otherwise.
*
* @return true if we're in URL-SAFE mode, false otherwise.
* @since 1.4
*/
public boolean isUrlSafe() {
return this.encodeTable == URL_SAFE_ENCODE_TABLE;
}
/**
* <p>
* Encodes all of the provided data, starting at inPos, for inAvail bytes. Must be called at least twice: once with
* the data to encode, and once with inAvail set to "-1" to alert encoder that EOF has been reached, so flush last
* remaining bytes (if not multiple of 3).
* </p>
* <p>
* Thanks to "commons" project in ws.apache.org for the bitwise operations, and general approach.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
* </p>
*
* @param in
* byte[] array of binary data to base64 encode.
* @param inPos
* Position to start reading data from.
* @param inAvail
* Amount of bytes available from input for encoding.
*/
@Override
void encode(byte[] in, int inPos, int inAvail) {
if (eof) {
return;
}
// inAvail < 0 is how we're informed of EOF in the underlying data we're
// encoding.
if (inAvail < 0) {
eof = true;
if (0 == modulus && lineLength == 0) {
return; // no leftovers to process and not using chunking
}
ensureBufferSize(encodeSize);
int savedPos = pos;
switch (modulus) { // 0-2
case 1 : // 8 bits = 6 + 2
buffer[pos++] = encodeTable[(bitWorkArea >> 2) & MASK_6BITS]; // top 6 bits
buffer[pos++] = encodeTable[(bitWorkArea << 4) & MASK_6BITS]; // remaining 2
// URL-SAFE skips the padding to further reduce size.
if (encodeTable == STANDARD_ENCODE_TABLE) {
buffer[pos++] = PAD;
buffer[pos++] = PAD;
}
break;
case 2 : // 16 bits = 6 + 6 + 4
buffer[pos++] = encodeTable[(bitWorkArea >> 10) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 4) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea << 2) & MASK_6BITS];
// URL-SAFE skips the padding to further reduce size.
if (encodeTable == STANDARD_ENCODE_TABLE) {
buffer[pos++] = PAD;
}
break;
}
currentLinePos += pos - savedPos; // keep track of current line position
// if currentPos == 0 we are at the start of a line, so don't add CRLF
if (lineLength > 0 && currentLinePos > 0) {
System.arraycopy(lineSeparator, 0, buffer, pos, lineSeparator.length);
pos += lineSeparator.length;
}
} else {
for (int i = 0; i < inAvail; i++) {
ensureBufferSize(encodeSize);
modulus = (modulus+1) % BYTES_PER_UNENCODED_BLOCK;
int b = in[inPos++];
if (b < 0) {
b += 256;
}
bitWorkArea = (bitWorkArea << 8) + b; // BITS_PER_BYTE
if (0 == modulus) { // 3 bytes = 24 bits = 4 * 6 bits to extract
buffer[pos++] = encodeTable[(bitWorkArea >> 18) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 12) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 6) & MASK_6BITS];
buffer[pos++] = encodeTable[bitWorkArea & MASK_6BITS];
currentLinePos += BYTES_PER_ENCODED_BLOCK;
if (lineLength > 0 && lineLength <= currentLinePos) {
System.arraycopy(lineSeparator, 0, buffer, pos, lineSeparator.length);
pos += lineSeparator.length;
currentLinePos = 0;
}
}
}
}
}
/**
* <p>
* Decodes all of the provided data, starting at inPos, for inAvail bytes. Should be called at least twice: once
* with the data to decode, and once with inAvail set to "-1" to alert decoder that EOF has been reached. The "-1"
* call is not necessary when decoding, but it doesn't hurt, either.
* </p>
* <p>
* Ignores all non-base64 characters. This is how chunked (e.g. 76 character) data is handled, since CR and LF are
* silently ignored, but has implications for other bytes, too. This method subscribes to the garbage-in,
* garbage-out philosophy: it will not check the provided data for validity.
* </p>
* <p>
* Thanks to "commons" project in ws.apache.org for the bitwise operations, and general approach.
* http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
* </p>
*
* @param in
* byte[] array of ascii data to base64 decode.
* @param inPos
* Position to start reading data from.
* @param inAvail
* Amount of bytes available from input for encoding.
*/
@Override
void decode(byte[] in, int inPos, int inAvail) {
if (eof) {
return;
}
if (inAvail < 0) {
eof = true;
}
for (int i = 0; i < inAvail; i++) {
ensureBufferSize(decodeSize);
byte b = in[inPos++];
if (b == PAD) {
// We're done.
eof = true;
break;
} else {
if (b >= 0 && b < DECODE_TABLE.length) {
int result = DECODE_TABLE[b];
if (result >= 0) {
modulus = (modulus+1) % BYTES_PER_ENCODED_BLOCK;
bitWorkArea = (bitWorkArea << BITS_PER_ENCODED_BYTE) + result;
if (modulus == 0) {
buffer[pos++] = (byte) ((bitWorkArea >> 16) & MASK_8BITS);
buffer[pos++] = (byte) ((bitWorkArea >> 8) & MASK_8BITS);
buffer[pos++] = (byte) (bitWorkArea & MASK_8BITS);
}
}
}
}
}
// Two forms of EOF as far as base64 decoder is concerned: actual
// EOF (-1) and first time '=' character is encountered in stream.
// This approach makes the '=' padding characters completely optional.
if (eof && modulus != 0) {
ensureBufferSize(decodeSize);
// We have some spare bits remaining
// Output all whole multiples of 8 bits and ignore the rest
switch (modulus) {
// case 1: // 6 bits - ignore entirely
// break;
case 2 : // 12 bits = 8 + 4
bitWorkArea = bitWorkArea >> 4; // dump the extra 4 bits
buffer[pos++] = (byte) ((bitWorkArea) & MASK_8BITS);
break;
case 3 : // 18 bits = 8 + 8 + 2
bitWorkArea = bitWorkArea >> 2; // dump 2 bits
buffer[pos++] = (byte) ((bitWorkArea >> 8) & MASK_8BITS);
buffer[pos++] = (byte) ((bitWorkArea) & MASK_8BITS);
break;
}
}
}
/**
* Tests a given byte array to see if it contains only valid characters within the Base64 alphabet. Currently the
* method treats whitespace as valid.
*
* @param arrayOctet
* byte array to test
* @return <code>true</code> if all bytes are valid characters in the Base64 alphabet or if the byte array is empty;
* <code>false</code>, otherwise
* @deprecated 1.5 Use {@link #isBase64(byte[])}, will be removed in 2.0.
*/
public static boolean isArrayByteBase64(byte[] arrayOctet) {
return isBase64(arrayOctet);
}
/**
* Returns whether or not the <code>octet</code> is in the base 64 alphabet.
*
* @param octet
* The value to test
* @return <code>true</code> if the value is defined in the the base 64 alphabet, <code>false</code> otherwise.
* @since 1.4
*/
public static boolean isBase64(byte octet) {
return octet == PAD_DEFAULT || (octet >= 0 && octet < DECODE_TABLE.length && DECODE_TABLE[octet] != -1);
}
/**
* Tests a given String to see if it contains only valid characters within the Base64 alphabet. Currently the
* method treats whitespace as valid.
*
* @param base64
* String to test
* @return <code>true</code> if all characters in the String are valid characters in the Base64 alphabet or if
* the String is empty; <code>false</code>, otherwise
* @since 1.5
*/
public static boolean isBase64(String base64) {
return isBase64(StringUtils.getBytesUtf8(base64));
}
/**
* Tests a given byte array to see if it contains only valid characters within the Base64 alphabet. Currently the
* method treats whitespace as valid.
*
* @param arrayOctet
* byte array to test
* @return <code>true</code> if all bytes are valid characters in the Base64 alphabet or if the byte array is empty;
* <code>false</code>, otherwise
* @since 1.5
*/
public static boolean isBase64(byte[] arrayOctet) {
for (int i = 0; i < arrayOctet.length; i++) {
if (!isBase64(arrayOctet[i]) && !isWhiteSpace(arrayOctet[i])) {
return false;
}
}
return true;
}
/**
* Encodes binary data using the base64 algorithm but does not chunk the output.
*
* @param binaryData
* binary data to encode
* @return byte[] containing Base64 characters in their UTF-8 representation.
*/
public static byte[] encodeBase64(byte[] binaryData) {
return encodeBase64(binaryData, false);
}
/**
* Encodes binary data using the base64 algorithm but does not chunk the output.
*
* NOTE: We changed the behaviour of this method from multi-line chunking (commons-codec-1.4) to
* single-line non-chunking (commons-codec-1.5).
*
* @param binaryData
* binary data to encode
* @return String containing Base64 characters.
* @since 1.4 (NOTE: 1.4 chunked the output, whereas 1.5 does not).
*/
public static String encodeBase64String(byte[] binaryData) {
return StringUtils.newStringUtf8(encodeBase64(binaryData, false));
}
/**
* Encodes binary data using a URL-safe variation of the base64 algorithm but does not chunk the output. The
* url-safe variation emits - and _ instead of + and / characters.
*
* @param binaryData
* binary data to encode
* @return byte[] containing Base64 characters in their UTF-8 representation.
* @since 1.4
*/
public static byte[] encodeBase64URLSafe(byte[] binaryData) {
return encodeBase64(binaryData, false, true);
}
/**
* Encodes binary data using a URL-safe variation of the base64 algorithm but does not chunk the output. The
* url-safe variation emits - and _ instead of + and / characters.
*
* @param binaryData
* binary data to encode
* @return String containing Base64 characters
* @since 1.4
*/
public static String encodeBase64URLSafeString(byte[] binaryData) {
return StringUtils.newStringUtf8(encodeBase64(binaryData, false, true));
}
/**
* Encodes binary data using the base64 algorithm and chunks the encoded output into 76 character blocks
*
* @param binaryData
* binary data to encode
* @return Base64 characters chunked in 76 character blocks
*/
public static byte[] encodeBase64Chunked(byte[] binaryData) {
return encodeBase64(binaryData, true);
}
/**
* Encodes binary data using the base64 algorithm, optionally chunking the output into 76 character blocks.
*
* @param binaryData
* Array containing binary data to encode.
* @param isChunked
* if <code>true</code> this encoder will chunk the base64 output into 76 character blocks
* @return Base64-encoded data.
* @throws IllegalArgumentException
* Thrown when the input array needs an output array bigger than {@link Integer#MAX_VALUE}
*/
public static byte[] encodeBase64(byte[] binaryData, boolean isChunked) {
return encodeBase64(binaryData, isChunked, false);
}
/**
* Encodes binary data using the base64 algorithm, optionally chunking the output into 76 character blocks.
*
* @param binaryData
* Array containing binary data to encode.
* @param isChunked
* if <code>true</code> this encoder will chunk the base64 output into 76 character blocks
* @param urlSafe
* if <code>true</code> this encoder will emit - and _ instead of the usual + and / characters.
* @return Base64-encoded data.
* @throws IllegalArgumentException
* Thrown when the input array needs an output array bigger than {@link Integer#MAX_VALUE}
* @since 1.4
*/
public static byte[] encodeBase64(byte[] binaryData, boolean isChunked, boolean urlSafe) {
return encodeBase64(binaryData, isChunked, urlSafe, Integer.MAX_VALUE);
}
/**
* Encodes binary data using the base64 algorithm, optionally chunking the output into 76 character blocks.
*
* @param binaryData
* Array containing binary data to encode.
* @param isChunked
* if <code>true</code> this encoder will chunk the base64 output into 76 character blocks
* @param urlSafe
* if <code>true</code> this encoder will emit - and _ instead of the usual + and / characters.
* @param maxResultSize
* The maximum result size to accept.
* @return Base64-encoded data.
* @throws IllegalArgumentException
* Thrown when the input array needs an output array bigger than maxResultSize
* @since 1.4
*/
public static byte[] encodeBase64(byte[] binaryData, boolean isChunked, boolean urlSafe, int maxResultSize) {
if (binaryData == null || binaryData.length == 0) {
return binaryData;
}
// Create this so can use the super-class method
// Also ensures that the same roundings are performed by the ctor and the code
Base64 b64 = isChunked ? new Base64(urlSafe) : new Base64(0, CHUNK_SEPARATOR, urlSafe);
long len = b64.getEncodedLength(binaryData);
if (len > maxResultSize) {
throw new IllegalArgumentException("Input array too big, the output array would be bigger (" +
len +
") than the specified maximum size of " +
maxResultSize);
}
return b64.encode(binaryData);
}
/**
* Decodes a Base64 String into octets
*
* @param base64String
* String containing Base64 data
* @return Array containing decoded data.
* @since 1.4
*/
public static byte[] decodeBase64(String base64String) {
return new Base64().decode(base64String);
}
/**
* Decodes Base64 data into octets
*
* @param base64Data
* Byte array containing Base64 data
* @return Array containing decoded data.
*/
public static byte[] decodeBase64(byte[] base64Data) {
return new Base64().decode(base64Data);
}
// Implementation of the Encoder Interface
// Implementation of integer encoding used for crypto
/**
* Decodes a byte64-encoded integer according to crypto standards such as W3C's XML-Signature
*
* @param pArray
* a byte array containing base64 character data
* @return A BigInteger
* @since 1.4
*/
public static BigInteger decodeInteger(byte[] pArray) {
return new BigInteger(1, decodeBase64(pArray));
}
/**
* Encodes to a byte64-encoded integer according to crypto standards such as W3C's XML-Signature
*
* @param bigInt
* a BigInteger
* @return A byte array containing base64 character data
* @throws NullPointerException
* if null is passed in
* @since 1.4
*/
public static byte[] encodeInteger(BigInteger bigInt) {
if (bigInt == null) {
throw new NullPointerException("encodeInteger called with null parameter");
}
return encodeBase64(toIntegerBytes(bigInt), false);
}
/**
* Returns a byte-array representation of a <code>BigInteger</code> without sign bit.
*
* @param bigInt
* <code>BigInteger</code> to be converted
* @return a byte array representation of the BigInteger parameter
*/
static byte[] toIntegerBytes(BigInteger bigInt) {
int bitlen = bigInt.bitLength();
// round bitlen
bitlen = ((bitlen + 7) >> 3) << 3;
byte[] bigBytes = bigInt.toByteArray();
if (((bigInt.bitLength() % 8) != 0) && (((bigInt.bitLength() / 8) + 1) == (bitlen / 8))) {
return bigBytes;
}
// set up params for copying everything but sign bit
int startSrc = 0;
int len = bigBytes.length;
// if bigInt is exactly byte-aligned, just skip signbit in copy
if ((bigInt.bitLength() % 8) == 0) {
startSrc = 1;
len--;
}
int startDst = bitlen / 8 - len; // to pad w/ nulls as per spec
byte[] resizedBytes = new byte[bitlen / 8];
System.arraycopy(bigBytes, startSrc, resizedBytes, startDst, len);
return resizedBytes;
}
/**
* Returns whether or not the <code>octet</code> is in the Base32 alphabet.
*
* @param octet
* The value to test
* @return <code>true</code> if the value is defined in the the Base32 alphabet <code>false</code> otherwise.
*/
@Override
protected boolean isInAlphabet(byte octet) {
return octet >= 0 && octet < decodeTable.length && decodeTable[octet] != -1;
}
}
| 3,908 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/codec/binary/BaseNCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.base.codec.binary;
import com.paypal.base.codec.BinaryDecoder;
import com.paypal.base.codec.BinaryEncoder;
import com.paypal.base.codec.DecoderException;
import com.paypal.base.codec.EncoderException;
/**
* Abstract superclass for Base-N encoders and decoders.
*
* <p>
* This class is not thread-safe.
* Each thread should use its own instance.
* </p>
*/
public abstract class BaseNCodec implements BinaryEncoder, BinaryDecoder {
/**
* MIME chunk size per RFC 2045 section 6.8.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any
* equal signs.
* </p>
*
* @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 6.8</a>
*/
public static final int MIME_CHUNK_SIZE = 76;
/**
* PEM chunk size per RFC 1421 section 4.3.2.4.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any
* equal signs.
* </p>
*
* @see <a href="http://tools.ietf.org/html/rfc1421">RFC 1421 section 4.3.2.4</a>
*/
public static final int PEM_CHUNK_SIZE = 64;
private static final int DEFAULT_BUFFER_RESIZE_FACTOR = 2;
/**
* Defines the default buffer size - currently {@value}
* - must be large enough for at least one encoded block+separator
*/
private static final int DEFAULT_BUFFER_SIZE = 8192;
/** Mask used to extract 8 bits, used in decoding bytes */
protected static final int MASK_8BITS = 0xff;
/**
* Byte used to pad output.
*/
protected static final byte PAD_DEFAULT = '='; // Allow static access to default
protected final byte PAD = PAD_DEFAULT; // instance variable just in case it needs to vary later
/** Number of bytes in each full block of unencoded data, e.g. 4 for Base64 and 5 for Base32 */
private final int unencodedBlockSize;
/** Number of bytes in each full block of encoded data, e.g. 3 for Base64 and 8 for Base32 */
private final int encodedBlockSize;
/**
* Chunksize for encoding. Not used when decoding.
* A value of zero or less implies no chunking of the encoded data.
* Rounded down to nearest multiple of encodedBlockSize.
*/
protected final int lineLength;
/**
* Size of chunk separator. Not used unless {@link #lineLength} > 0.
*/
private final int chunkSeparatorLength;
/**
* Buffer for streaming.
*/
protected byte[] buffer;
/**
* Position where next character should be written in the buffer.
*/
protected int pos;
/**
* Position where next character should be read from the buffer.
*/
private int readPos;
/**
* Boolean flag to indicate the EOF has been reached. Once EOF has been reached, this object becomes useless,
* and must be thrown away.
*/
protected boolean eof;
/**
* Variable tracks how many characters have been written to the current line. Only used when encoding. We use it to
* make sure each encoded line never goes beyond lineLength (if lineLength > 0).
*/
protected int currentLinePos;
/**
* Writes to the buffer only occur after every 3/5 reads when encoding, and every 4/8 reads when decoding.
* This variable helps track that.
*/
protected int modulus;
/**
* Note <code>lineLength</code> is rounded down to the nearest multiple of {@link #encodedBlockSize}
* If <code>chunkSeparatorLength</code> is zero, then chunking is disabled.
* @param unencodedBlockSize the size of an unencoded block (e.g. Base64 = 3)
* @param encodedBlockSize the size of an encoded block (e.g. Base64 = 4)
* @param lineLength if > 0, use chunking with a length <code>lineLength</code>
* @param chunkSeparatorLength the chunk separator length, if relevant
*/
protected BaseNCodec(int unencodedBlockSize, int encodedBlockSize, int lineLength, int chunkSeparatorLength){
this.unencodedBlockSize = unencodedBlockSize;
this.encodedBlockSize = encodedBlockSize;
this.lineLength = (lineLength > 0 && chunkSeparatorLength > 0) ? (lineLength / encodedBlockSize) * encodedBlockSize : 0;
this.chunkSeparatorLength = chunkSeparatorLength;
}
/**
* Returns true if this object has buffered data for reading.
*
* @return true if there is data still available for reading.
*/
boolean hasData() { // package protected for access from I/O streams
return this.buffer != null;
}
/**
* Returns the amount of buffered data available for reading.
*
* @return The amount of buffered data available for reading.
*/
int available() { // package protected for access from I/O streams
return buffer != null ? pos - readPos : 0;
}
/**
* Get the default buffer size. Can be overridden.
*
* @return {@link #DEFAULT_BUFFER_SIZE}
*/
protected int getDefaultBufferSize() {
return DEFAULT_BUFFER_SIZE;
}
/** Increases our buffer by the {@link #DEFAULT_BUFFER_RESIZE_FACTOR}. */
private void resizeBuffer() {
if (buffer == null) {
buffer = new byte[getDefaultBufferSize()];
pos = 0;
readPos = 0;
} else {
byte[] b = new byte[buffer.length * DEFAULT_BUFFER_RESIZE_FACTOR];
System.arraycopy(buffer, 0, b, 0, buffer.length);
buffer = b;
}
}
/**
* Ensure that the buffer has room for <code>size</code> bytes
*
* @param size minimum spare space required
*/
protected void ensureBufferSize(int size){
if ((buffer == null) || (buffer.length < pos + size)){
resizeBuffer();
}
}
/**
* Extracts buffered data into the provided byte[] array, starting at position bPos,
* up to a maximum of bAvail bytes. Returns how many bytes were actually extracted.
*
* @param b
* byte[] array to extract the buffered data into.
* @param bPos
* position in byte[] array to start extraction at.
* @param bAvail
* amount of bytes we're allowed to extract. We may extract fewer (if fewer are available).
* @return The number of bytes successfully extracted into the provided byte[] array.
*/
int readResults(byte[] b, int bPos, int bAvail) { // package protected for access from I/O streams
if (buffer != null) {
int len = Math.min(available(), bAvail);
System.arraycopy(buffer, readPos, b, bPos, len);
readPos += len;
if (readPos >= pos) {
buffer = null; // so hasData() will return false, and this method can return -1
}
return len;
}
return eof ? -1 : 0;
}
/**
* Checks if a byte value is whitespace or not.
* Whitespace is taken to mean: space, tab, CR, LF
* @param byteToCheck
* the byte to check
* @return true if byte is whitespace, false otherwise
*/
protected static boolean isWhiteSpace(byte byteToCheck) {
switch (byteToCheck) {
case ' ' :
case '\n' :
case '\r' :
case '\t' :
return true;
default :
return false;
}
}
/**
* Resets this object to its initial newly constructed state.
*/
private void reset() {
buffer = null;
pos = 0;
readPos = 0;
currentLinePos = 0;
modulus = 0;
eof = false;
}
/**
* Encodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the
* Encoder interface, and will throw an EncoderException if the supplied object is not of type byte[].
*
* @param pObject
* Object to encode
* @return An object (of type byte[]) containing the Base-N encoded data which corresponds to the byte[] supplied.
* @throws EncoderException
* if the parameter supplied is not of type byte[]
*/
public Object encode(Object pObject) throws EncoderException {
if (!(pObject instanceof byte[])) {
throw new EncoderException("Parameter supplied to Base-N encode is not a byte[]");
}
return encode((byte[]) pObject);
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the Base-N alphabet.
*
* @param pArray
* a byte array containing binary data
* @return A String containing only Base-N character data
*/
public String encodeToString(byte[] pArray) {
return StringUtils.newStringUtf8(encode(pArray));
}
/**
* Decodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the
* Decoder interface, and will throw a DecoderException if the supplied object is not of type byte[] or String.
*
* @param pObject
* Object to decode
* @return An object (of type byte[]) containing the binary data which corresponds to the byte[] or String supplied.
* @throws DecoderException
* if the parameter supplied is not of type byte[]
*/
public Object decode(Object pObject) throws DecoderException {
if (pObject instanceof byte[]) {
return decode((byte[]) pObject);
} else if (pObject instanceof String) {
return decode((String) pObject);
} else {
throw new DecoderException("Parameter supplied to Base-N decode is not a byte[] or a String");
}
}
/**
* Decodes a String containing characters in the Base-N alphabet.
*
* @param pArray
* A String containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(String pArray) {
return decode(StringUtils.getBytesUtf8(pArray));
}
/**
* Decodes a byte[] containing characters in the Base-N alphabet.
*
* @param pArray
* A byte array containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
decode(pArray, 0, pArray.length);
decode(pArray, 0, -1); // Notify decoder of EOF.
byte[] result = new byte[pos];
readResults(result, 0, result.length);
return result;
}
/**
* Encodes a byte[] containing binary data, into a byte[] containing characters in the alphabet.
*
* @param pArray
* a byte array containing binary data
* @return A byte array containing only the basen alphabetic character data
*/
public byte[] encode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
encode(pArray, 0, pArray.length);
encode(pArray, 0, -1); // Notify encoder of EOF.
byte[] buf = new byte[pos - readPos];
readResults(buf, 0, buf.length);
return buf;
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the appropriate alphabet.
* Uses UTF8 encoding.
*
* @param pArray a byte array containing binary data
* @return String containing only character data in the appropriate alphabet.
*/
public String encodeAsString(byte[] pArray){
return StringUtils.newStringUtf8(encode(pArray));
}
abstract void encode(byte[] pArray, int i, int length); // package protected for access from I/O streams
abstract void decode(byte[] pArray, int i, int length); // package protected for access from I/O streams
/**
* Returns whether or not the <code>octet</code> is in the current alphabet.
* Does not allow whitespace or pad.
*
* @param value The value to test
*
* @return <code>true</code> if the value is defined in the current alphabet, <code>false</code> otherwise.
*/
protected abstract boolean isInAlphabet(byte value);
/**
* Tests a given byte array to see if it contains only valid characters within the alphabet.
* The method optionally treats whitespace and pad as valid.
*
* @param arrayOctet byte array to test
* @param allowWSPad if <code>true</code>, then whitespace and PAD are also allowed
*
* @return <code>true</code> if all bytes are valid characters in the alphabet or if the byte array is empty;
* <code>false</code>, otherwise
*/
public boolean isInAlphabet(byte[] arrayOctet, boolean allowWSPad) {
for (int i = 0; i < arrayOctet.length; i++) {
if (!isInAlphabet(arrayOctet[i]) &&
(!allowWSPad || (arrayOctet[i] != PAD) && !isWhiteSpace(arrayOctet[i]))) {
return false;
}
}
return true;
}
/**
* Tests a given String to see if it contains only valid characters within the alphabet.
* The method treats whitespace and PAD as valid.
*
* @param basen String to test
* @return <code>true</code> if all characters in the String are valid characters in the alphabet or if
* the String is empty; <code>false</code>, otherwise
* @see #isInAlphabet(byte[], boolean)
*/
public boolean isInAlphabet(String basen) {
return isInAlphabet(StringUtils.getBytesUtf8(basen), true);
}
/**
* Tests a given byte array to see if it contains any characters within the alphabet or PAD.
*
* Intended for use in checking line-ending arrays
*
* @param arrayOctet
* byte array to test
* @return <code>true</code> if any byte is a valid character in the alphabet or PAD; <code>false</code> otherwise
*/
protected boolean containsAlphabetOrPad(byte[] arrayOctet) {
if (arrayOctet == null) {
return false;
}
for (byte element : arrayOctet) {
if (PAD == element || isInAlphabet(element)) {
return true;
}
}
return false;
}
/**
* Calculates the amount of space needed to encode the supplied array.
*
* @param pArray byte[] array which will later be encoded
*
* @return amount of space needed to encoded the supplied array.
* Returns a long since a max-len array will require > Integer.MAX_VALUE
*/
public long getEncodedLength(byte[] pArray) {
// Calculate non-chunked size - rounded up to allow for padding
// cast to long is needed to avoid possibility of overflow
long len = ((pArray.length + unencodedBlockSize-1) / unencodedBlockSize) * (long) encodedBlockSize;
if (lineLength > 0) { // We're using chunking
// Round up to nearest multiple
len += ((len + lineLength-1) / lineLength) * chunkSeparatorLength;
}
return len;
}
}
| 3,909 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/sdk
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/sdk/info/SDKVersionImpl.java
|
package com.paypal.base.sdk.info;
import com.paypal.base.SDKVersion;
/**
* Implementation of SDKVersion
*/
public class SDKVersionImpl implements SDKVersion {
/**
* SDK ID used in User-Agent HTTP header
*/
private static final String SDK_ID = "PayPal-Java-SDK";
/**
* SDK Version used in User-Agent HTTP header
*/
private static final String SDK_VERSION = "1.14.1";
public String getSDKId() {
return SDK_ID;
}
public String getSDKVersion() {
return SDK_VERSION;
}
}
| 3,910 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/OAuthException.java
|
/*
* Copyright 2005 PayPal, Inc. All Rights Reserved.
*/
package com.paypal.base.exception;
public class OAuthException extends PayPalException{
/**
* Constructs a new exception with the specified detail message.
*/
public OAuthException(String message)
{
super(message);
this.message = message;
}
public OAuthException(String message, Throwable throwable) {
super(message, throwable);
this.message = message;
}
public String getError() {
return message;
}
private String message;
}
| 3,911 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/SSLConfigurationException.java
|
package com.paypal.base.exception;
/**
* SSLConfigurationException is thrown for error caused during SSL connection
*
*/
public class SSLConfigurationException extends BaseException {
/**
* Serial version UID
*/
private static final long serialVersionUID = -2345834567387658303L;
public SSLConfigurationException(String message) {
super(message);
}
public SSLConfigurationException(String message, Throwable exception) {
super(message, exception);
}
}
| 3,912 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/PayPalException.java
|
/*
* Copyright 2005 PayPal, Inc. All Rights Reserved.
*/
package com.paypal.base.exception;
/**
* A PayPalException is thrown to signal a problem during SDK execution.
*/
public abstract class PayPalException extends Exception
{
/*
* Default constructor
*/
public PayPalException()
{
super();
}
/*
* Constructs a new exception with the specified detail message.
*/
public PayPalException(String message)
{
super(message);
}
/*
* Constructs a new exception with the specified detail message and cause.
*/
public PayPalException(String message, Throwable cause)
{
super(message, cause);
}
} // PayPalException
| 3,913 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/InvalidResponseDataException.java
|
package com.paypal.base.exception;
/**
* InvalidResponseException used to denote errors in response data
*
*/
public class InvalidResponseDataException extends BaseException {
/**
* Serial version UID
*/
private static final long serialVersionUID = -7489562847530985773L;
public InvalidResponseDataException(String msg) {
super(msg);
}
public InvalidResponseDataException(String msg, Throwable exception) {
super(msg, exception);
}
}
| 3,914 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/MissingCredentialException.java
|
package com.paypal.base.exception;
/**
* MissingCredentialException is throw when the credential used are wrongly
* configured or not found in the application properties
*
*/
public class MissingCredentialException extends BaseException {
/**
* Serial versio UID
*/
private static final long serialVersionUID = -2345825926387658303L;
public MissingCredentialException(String message) {
super(message);
}
public MissingCredentialException(String message, Throwable exception) {
super(message, exception);
}
}
| 3,915 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/ClientActionRequiredException.java
|
package com.paypal.base.exception;
/**
* ClientActionRequiredException, encapsulates instances where client has to
* take actions based or errors in API call.
*
*/
public class ClientActionRequiredException extends HttpErrorException {
/**
* Serial version UID
*/
private static final long serialVersionUID = -15345584654755445L;
public ClientActionRequiredException(String message) {
super(message);
}
public ClientActionRequiredException(String message, Throwable exception) {
super(message, exception);
}
public ClientActionRequiredException(int responsecode, String errorResponse, String msg, Throwable exception) {
super(responsecode, errorResponse, msg, exception);
}
public String toString() {
return "HTTP response code: " + this.getResponsecode() + "\n"
+ "error message: " + this.getErrorResponse();
}
}
| 3,916 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/InvalidCredentialException.java
|
package com.paypal.base.exception;
/**
* InvalidCredentialException used to denote errors in credentials used in API
* call
*
*/
public class InvalidCredentialException extends BaseException {
/**
* Serial version UID
*/
private static final long serialVersionUID = -4321565982347658546L;
public InvalidCredentialException(String msg) {
super(msg);
}
public InvalidCredentialException(String msg, Throwable exception) {
super(msg, exception);
}
}
| 3,917 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/BaseException.java
|
package com.paypal.base.exception;
/**
* BaseException for SDK
*/
public class BaseException extends Exception {
/**
* Serial version UID
*/
private static final long serialVersionUID = -5345825923487658213L;
public BaseException(String msg) {
super(msg);
}
public BaseException(String msg, Throwable exception) {
super(msg, exception);
}
}
| 3,918 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/exception/HttpErrorException.java
|
package com.paypal.base.exception;
/**
* HttpErrorException denotes errors that occur in HTTP call
*
*/
public class HttpErrorException extends BaseException {
/**
* Serial version UID
*/
private static final long serialVersionUID = -4312358746964758546L;
private int responsecode;
private String errorResponse;
public HttpErrorException(String msg) {
super(msg);
}
public HttpErrorException(String msg, Throwable exception) {
super(msg, exception);
}
public HttpErrorException(int responsecode, String errorResponse, String msg, Throwable exception) {
super(msg, exception);
this.responsecode = responsecode;
this.errorResponse = errorResponse;
}
public int getResponsecode() {
return responsecode;
}
public String getErrorResponse() {
return errorResponse;
}
}
| 3,919 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/PayPalResource.java
|
package com.paypal.base.rest;
import com.paypal.base.*;
import com.paypal.base.exception.BaseException;
import com.paypal.base.exception.ClientActionRequiredException;
import com.paypal.base.exception.HttpErrorException;
import com.paypal.base.sdk.info.SDKVersionImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* PayPalResource acts as a base class for REST enabled resources.
*/
public abstract class PayPalResource extends PayPalModel{
private static final Logger log = LoggerFactory.getLogger(PayPalResource.class);
/*
* The class relies on an implementation of APICallPreHandler (here
* RESTAPICallPreHandler)to get access to endpoint, HTTP headers, and
* payload.
*/
/**
* Map used in dynamic configuration
*/
private static Map<String, String> configurationMap;
/**
* Configuration enabled flag
*/
private static boolean configInitialized = false;
/**
* Last request sent to Service
*/
private static final ThreadLocal<String> LASTREQUEST = new ThreadLocal<String>();
/**
* Last response returned form Service
*/
private static final ThreadLocal<String> LASTRESPONSE = new ThreadLocal<String>();
/**
* Initialize the system using a File(Properties file). The system is
* initialized using the given file and if the initialization succeeds the
* default 'sdk_config.properties' can only be loaded by calling the method
* initializeToDefault()
*
* @param file
* File object of a properties entity
* @throws PayPalRESTException
* @return OAuthTokenCredential instance with client ID and client secret stored in configuration file.
*/
public static OAuthTokenCredential initConfig(File file) throws PayPalRESTException {
try {
if (!file.exists()) {
throw new FileNotFoundException("File doesn't exist: "
+ file.getAbsolutePath());
}
FileInputStream fis = new FileInputStream(file);
return initConfig(fis);
} catch (IOException ioe) {
log.error(ioe.getMessage(), ioe);
throw new PayPalRESTException(ioe.getMessage(), ioe);
}
}
/**
* Initialize using Properties. The system is initialized using the given
* properties object and if the initialization succeeds the default
* 'sdk_config.properties' can only be loaded by calling the method
* initializeToDefault()
*
* @param properties
* Properties object
* @return OAuthTokenCredential instance with client ID and client secret in given properties.
*/
public static OAuthTokenCredential initConfig(Properties properties) {
configurationMap = SDKUtil.constructMap(properties);
configInitialized = true;
return getOAuthTokenCredential();
}
/**
* Initialize using {@link InputStream}(of a Properties file).. The system
* is initialized using the given {@link InputStream} and if the
* initialization succeeds the default 'sdk_config.properties' can only be
* loaded by calling the method initializeToDefault(). The system is
* initialized with the information after loading defaults for the
* parameters that are not passed as part of the configuration. For defaults
* see {@link ConfigManager}
*
* @param inputStream
* InputStream
* @throws PayPalRESTException
* @return OAuthTokenCredential instance with client ID and client secret stored in given inputStream.
*/
public static OAuthTokenCredential initConfig(InputStream inputStream)
throws PayPalRESTException {
try {
Properties properties = new Properties();
properties.load(inputStream);
/*
* Create a Map instance and combine it with default values
*/
configurationMap = SDKUtil.constructMap(properties);
configInitialized = true;
return getOAuthTokenCredential();
} catch (IOException ioe) {
log.error(ioe.getMessage(), ioe);
throw new PayPalRESTException(ioe.getMessage(), ioe);
}
}
/**
* Return Client ID from configuration Map
*/
public static String getClientID() {
return configurationMap.get(Constants.CLIENT_ID);
}
/**
* Returns Client Secret from configuration Map
*/
public static String getClientSecret() {
return configurationMap.get(Constants.CLIENT_SECRET);
}
/**
* Returns OAuthTokenCredential instance using client ID and client secret loaded from configuration.
* @return OAuthTokenCredential instance.
*/
public static OAuthTokenCredential getOAuthTokenCredential() {
if(configInitialized){
return new OAuthTokenCredential(getClientID(), getClientSecret(), configurationMap);
}else{
return new OAuthTokenCredential(getClientID(), getClientSecret());
}
}
/**
* Initialize to default properties
*
* @throws PayPalRESTException
*/
public static void initializeToDefault() throws PayPalRESTException {
configurationMap = SDKUtil.combineDefaultMap(ConfigManager
.getInstance().getConfigurationMap());
}
/**
* Returns the last request sent to the Service
*
* @return Last request sent to the server
*/
public static String getLastRequest() {
return LASTREQUEST.get();
}
/**
* Returns the last response returned by the Service
*
* @return Last response got from the Service
*/
public static String getLastResponse() {
return LASTRESPONSE.get();
}
public static Map<String, String> getConfigurations() {
return configurationMap;
}
/**
* Configures and executes REST call: Supports JSON
*
* @deprecated Please use {@link #configureAndExecute(APIContext, HttpMethod, String, String, Class)} instead.
* Passing APIContext gives us better information than just raw access token.
*
* @param <T>
* Response Type for de-serialization
* @param accessToken
* OAuth AccessToken to be used for the call.
* @param httpMethod
* Http Method verb
* @param resourcePath
* Resource URI path
* @param payLoad
* Payload to Service
* @param clazz
* {@link Class} object used in De-serialization
* @return T
* @throws PayPalRESTException
*/
public static <T> T configureAndExecute(String accessToken,
HttpMethod httpMethod, String resourcePath, String payLoad,
Class<T> clazz) throws PayPalRESTException {
return configureAndExecute(new APIContext(accessToken), httpMethod, resourcePath, payLoad, clazz);
}
/**
* Configures and executes REST call
*
* @param <T>
* Response Type for de-serialization
* @param apiContext
* {@link APIContext} to be used for the call.
* @param httpMethod
* Http Method verb
* @param resourcePath
* Resource URI path
* @param payLoad
* Payload to Service
* @param clazz
* {@link Class} object used in De-serialization
* @return T
* @throws PayPalRESTException
*/
public static <T> T configureAndExecute(APIContext apiContext,
HttpMethod httpMethod, String resourcePath, String payLoad,
Class<T> clazz) throws PayPalRESTException {
return configureAndExecute(apiContext, httpMethod, resourcePath, payLoad, clazz, null);
}
/**
* Configures and executes REST call: Supports JSON
*
* @param <T>
* Response Type for de-serialization
* @param apiContext
* {@link APIContext} to be used for the call.
* @param httpMethod
* Http Method verb
* @param resourcePath
* Resource URI path
* @param payLoad
* Payload to Service
* @param clazz
* {@link Class} object used in De-serialization
* @param accessToken
* Access Token to be used instead of apiContext
* @return T
* @throws PayPalRESTException
*/
public static <T> T configureAndExecute(APIContext apiContext,
HttpMethod httpMethod, String resourcePath, String payLoad,
Class<T> clazz, String accessToken) throws PayPalRESTException {
T t = null;
Map<String, String> cMap;
String requestId;
Map<String, String> headersMap;
if (apiContext != null) {
if (apiContext.getHTTPHeader(Constants.HTTP_CONTENT_TYPE_HEADER) == null) {
apiContext.addHTTPHeader(Constants.HTTP_CONTENT_TYPE_HEADER, Constants.HTTP_CONTENT_TYPE_JSON);
}
if (apiContext.getSdkVersion() != null) {
apiContext.setSdkVersion(new SDKVersionImpl());
}
if (apiContext.getConfigurationMap() != null) {
cMap = SDKUtil.combineDefaultMap(apiContext
.getConfigurationMap());
} else {
if (!configInitialized) {
initializeToDefault();
}
/*
* The Map returned here is already combined with default values
*/
cMap = new HashMap<String, String>(
configurationMap);
}
headersMap = apiContext.getHTTPHeaders();
if (accessToken == null) {
accessToken = apiContext.fetchAccessToken();
}
// If it is still null, throw the exception.
if (accessToken == null) {
throw new IllegalArgumentException("AccessToken cannot be null or empty");
}
requestId = apiContext.getRequestId();
APICallPreHandler apiCallPreHandler = createAPICallPreHandler(cMap,
payLoad, resourcePath, headersMap, accessToken, requestId,
apiContext.getSdkVersion());
HttpConfiguration httpConfiguration = createHttpConfiguration(cMap,
httpMethod, apiCallPreHandler);
t = execute(apiCallPreHandler, httpConfiguration, clazz);
}
return t;
}
/**
* Configures and executes REST call: Supports JSON
*
* @deprecated Please use {@link #configureAndExecute(APIContext, HttpMethod, String, String, Class)} instead. Headers could be passed directly
* to #APIContext itself.
*
* @param <T>
* @param apiContext
* {@link APIContext} to be used for the call.
* @param httpMethod
* Http Method verb
* @param resourcePath
* Resource URI path
* @param headersMap
* Optional headers Map
* @param payLoad
* Payload to Service
* @param clazz
* {@link Class} object used in De-serialization
* @return T
* @throws PayPalRESTException
*/
public static <T> T configureAndExecute(APIContext apiContext,
HttpMethod httpMethod, String resourcePath,
Map<String, String> headersMap, String payLoad, Class<T> clazz)
throws PayPalRESTException {
if (apiContext != null) {
apiContext.addHTTPHeaders(headersMap);
}
return configureAndExecute(apiContext, httpMethod, resourcePath, payLoad, clazz);
}
/**
* Returns a implementation of {@link APICallPreHandler} for the underlying
* layer.
*
* @param configurationMap
* configuration Map
* @param payLoad
* Raw payload
* @param resourcePath
* URI part of the resource operated on
* @param headersMap
* Custom HTTP headers map
* @param accessToken
* OAuth Token
* @param requestId
* PayPal Request Id
* @param sdkVersion
* {@link SDKVersion} instance
* @return APICallPreHandler
*/
public static APICallPreHandler createAPICallPreHandler(
Map<String, String> configurationMap, String payLoad,
String resourcePath, Map<String, String> headersMap,
String accessToken, String requestId, SDKVersion sdkVersion) {
APICallPreHandler apiCallPreHandler = null;
RESTAPICallPreHandler restAPICallPreHandler = new RESTAPICallPreHandler(
configurationMap, headersMap);
restAPICallPreHandler.setResourcePath(resourcePath);
restAPICallPreHandler.setRequestId(requestId);
restAPICallPreHandler.setAuthorizationToken(accessToken);
restAPICallPreHandler.setPayLoad(payLoad);
restAPICallPreHandler.setSdkVersion(sdkVersion);
apiCallPreHandler = restAPICallPreHandler;
return apiCallPreHandler;
}
/**
* Execute the API call and return response
*
* @param <T>
* Generic Type for response object construction
* @param apiCallPreHandler
* Implementation of {@link APICallPreHandler}
* @param httpConfiguration
* {@link HttpConfiguration}
* @param clazz
* Response Object class
* @return Response Type
* @throws PayPalRESTException
*/
private static <T> T execute(APICallPreHandler apiCallPreHandler,
HttpConfiguration httpConfiguration, Class<T> clazz)
throws PayPalRESTException {
T t = null;
ConnectionManager connectionManager;
HttpConnection httpConnection;
Map<String, String> headers;
String responseString;
try {
// REST Headers
headers = apiCallPreHandler.getHeaderMap();
// HttpConnection Initialization
connectionManager = ConnectionManager.getInstance();
httpConnection = connectionManager.getConnection(httpConfiguration);
httpConnection.createAndconfigureHttpConnection(httpConfiguration);
// capture request and log if conditions are met
LASTREQUEST.set(apiCallPreHandler.getPayLoad());
String mode = "";
if (configurationMap != null) {
mode = configurationMap.get(Constants.MODE);
} else if (apiCallPreHandler.getConfigurationMap() != null) {
mode = apiCallPreHandler.getConfigurationMap().get(Constants.MODE);
}
if (Constants.LIVE.equalsIgnoreCase(mode) && log.isDebugEnabled()) {
log.warn("Log level cannot be set to DEBUG in " + Constants.LIVE + " mode. Skipping request/response logging...");
}
if (!Constants.LIVE.equalsIgnoreCase(mode)) {
log.debug("request header: " + headers.toString());
log.debug("request body: " + LASTREQUEST.get());
}
// send request and receive response
responseString = httpConnection.execute(null,
apiCallPreHandler.getPayLoad(), headers);
// capture response and log if conditions are met
LASTRESPONSE.set(responseString);
if (!Constants.LIVE.equalsIgnoreCase(mode)) {
log.debug("response: " + LASTRESPONSE.get());
}
if (clazz != null) {
t = JSONFormatter.fromJSON(responseString, clazz);
}
} catch (ClientActionRequiredException e) {
throw PayPalRESTException.createFromHttpErrorException(e);
} catch (HttpErrorException e) {
throw PayPalRESTException.createFromHttpErrorException(e);
} catch (Exception e) {
throw new PayPalRESTException(e.getMessage(), e);
}
return t;
}
/**
* Utility method that creates a {@link HttpConfiguration} object from the
* passed information
*
* @param configurationMap
* Configuration to base the construction upon.
* @param httpMethod
* HTTP Method
* @param apiCallPreHandler
* {@link APICallPreHandler} for retrieving EndPoint
* @return
* @throws BaseException
* @throws PayPalRESTException
*/
private static HttpConfiguration createHttpConfiguration(
Map<String, String> configurationMap, HttpMethod httpMethod,
APICallPreHandler apiCallPreHandler) throws PayPalRESTException {
HttpConfiguration httpConfiguration = new HttpConfiguration();
httpConfiguration.setHttpMethod(httpMethod.toString());
String endpoint = apiCallPreHandler.getEndPoint();
if (endpoint == null || endpoint.isEmpty()) {
throw new PayPalRESTException("The endpoint could not be fetched properly. You may be missing `mode` in your configuration.");
}
httpConfiguration.setEndPointUrl(apiCallPreHandler.getEndPoint());
httpConfiguration
.setGoogleAppEngine(Boolean.parseBoolean(configurationMap
.get(Constants.GOOGLE_APP_ENGINE)));
if (Boolean.parseBoolean(configurationMap
.get((Constants.USE_HTTP_PROXY)))) {
httpConfiguration.setProxyPort(Integer.parseInt(configurationMap
.get((Constants.HTTP_PROXY_PORT))));
httpConfiguration.setProxyHost(configurationMap
.get((Constants.HTTP_PROXY_HOST)));
httpConfiguration.setProxyUserName(configurationMap
.get((Constants.HTTP_PROXY_USERNAME)));
httpConfiguration.setProxyPassword(configurationMap
.get((Constants.HTTP_PROXY_PASSWORD)));
}
httpConfiguration.setConnectionTimeout(Integer
.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_TIMEOUT)));
httpConfiguration.setMaxRetry(Integer.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_RETRY)));
httpConfiguration.setReadTimeout(Integer.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_READ_TIMEOUT)));
httpConfiguration.setMaxHttpConnection(Integer
.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_MAX_CONNECTION)));
httpConfiguration.setIpAddress(configurationMap
.get(Constants.DEVICE_IP_ADDRESS));
return httpConfiguration;
}
/**
* Returns ClientCredentials with client id and client secret from configuration Map
*
* @return Client credentials
*/
public static ClientCredentials getCredential() {
ClientCredentials credentials = new ClientCredentials();
Properties configFileProperties = getConfigFileProperties();
addConfigurations(configFileProperties);
credentials.setClientID(configurationMap.get(Constants.CLIENT_ID));
credentials.setClientSecret(configurationMap.get(Constants.CLIENT_SECRET));
return credentials;
}
/**
* @deprecated Please use static method `getCredential` instead.
*
* Returns ClientCredentials with client id and client secret from configuration Map.
*
* @return Client credentials
*/
public ClientCredentials getClientCredential() {
return PayPalResource.getCredential();
}
/**
* Fetches the properties from default configuration file.
*
* @return {@link Properties}
*/
private static Properties getConfigFileProperties() {
Properties properties = new Properties();
try {
properties.load(new FileReader(
new File(PayPalResource.class.getClassLoader().getResource(Constants.DEFAULT_CONFIGURATION_FILE).getFile())));
} catch (FileNotFoundException e) {
return null;
} catch (IOException e) {
return null;
}
return properties;
}
/**
* Merges properties object with the configuration hash map. The configuration values are given higher priority.
*
* @param properties
*/
private static void addConfigurations(Properties properties) {
if (configurationMap == null) {
configurationMap = new HashMap<String, String>();
}
if (properties != null) {
for (final String name : properties.stringPropertyNames()) {
if (!configurationMap.containsKey(name)) {
configurationMap.put(name, properties.getProperty(name));
}
}
}
}
}
| 3,920 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/QueryParameters.java
|
package com.paypal.base.rest;
import java.util.HashMap;
import java.util.Map;
/**
* <code>QueryParameters</code> holds query parameters used for retrieving
* {@link com.paypal.api.payments.PaymentHistory} object.
*/
public class QueryParameters {
/**
* Count
*/
private static final String COUNT = "count";
/**
* Start Id
*/
private static final String STARTID = "start_id";
/**
* Start Index
*/
private static final String STARTINDEX = "start_index";
/**
* Start Time
*/
private static final String STARTTIME = "start_time";
/**
* End Time
*/
private static final String ENDTIME = "end_time";
/**
* Payee Id
*/
private static final String PAYEEID = "payee_id";
/**
* Sort By
*/
private static final String SORTBY = "sort_by";
/**
* Sort Order
*/
private static final String SORTORDER = "sort_order";
// Map backing QueryParameters intended to processed
// by SDK library 'RESTUtil'
private Map<String, String> containerMap;
public QueryParameters() {
containerMap = new HashMap<String, String>();
}
/**
* @return the containerMap
*/
public Map<String, String> getContainerMap() {
return containerMap;
}
/**
* Set the count
*
* @param count
* Number of items to return.
*/
public void setCount(String count) {
containerMap.put(COUNT, count);
}
/**
* Set the startId
*
* @param startId
* Resource ID that indicates the starting resource to return.
*/
public void setStartId(String startId) {
containerMap.put(STARTID, startId);
}
/**
* Set the startIndex
*
* @param startIndex
* Start index of the resources to be returned. Typically used to
* jump to a specific position in the resource history based on
* its order.
*/
public void setStartIndex(String startIndex) {
containerMap.put(STARTINDEX, startIndex);
}
/**
* Set the startTime
*
* @param startTime
* Resource creation time that indicates the start of a range of
* results.
*/
public void setStartTime(String startTime) {
containerMap.put(STARTTIME, startTime);
}
/**
* Set the endTime
*
* @param endTime
* Resource creation time that indicates the end of a range of
* results.
*/
public void setEndTime(String endTime) {
containerMap.put(ENDTIME, endTime);
}
/**
* Set the payeeId
*
* @param payeeId
* PayeeId
*/
public void setPayeeId(String payeeId) {
containerMap.put(PAYEEID, payeeId);
}
/**
* Set the sortBy
*
* @param sortBy
* Sort based on create_time or update_time.
*/
public void setSortBy(String sortBy) {
containerMap.put(SORTBY, sortBy);
}
/**
* Set the sortOrder
*
* @param sortOrder
* Sort based on order of results. Options include asc for
* ascending order or dec for descending order.
*/
public void setSortOrder(String sortOrder) {
containerMap.put(SORTORDER, sortOrder);
}
}
| 3,921 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/HttpMethod.java
|
package com.paypal.base.rest;
/**
* HttpMethod enums used for HTTP method verbs
*/
public enum HttpMethod {
// Get Http Method
GET,
// Post Http Method
POST,
// Patch Http Method
PATCH,
// Put Http Method
PUT,
// Delete Http Method
DELETE;
}
| 3,922 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/APIContext.java
|
package com.paypal.base.rest;
import com.paypal.base.Constants;
import com.paypal.base.SDKVersion;
import java.util.Map;
import java.util.UUID;
/**
* <code>APIContext</code> wraps wire-level parameters for the API call.
* AccessToken, which is essentially an OAuth token, is treated as a mandatory
* parameter for (PayPal REST APIs). RequestId is generated if not supplied for
* marking Idempotency of the API call. OAuth token can be generated using
* {@link OAuthTokenCredential}. The Application Header property may be used by
* clients to access application level headers. The clients are responsible to
* cast the Application Header property to appropriate type.
*/
public class APIContext {
/**
* Request Id
*/
private String requestId;
/**
* Parameter to mask RequestId
*/
private boolean maskRequestId;
/**
* {@link SDKVersion} instance
*/
private SDKVersion sdkVersion;
/**
* {@link OAuthTokenCredential} credential instance
*/
private OAuthTokenCredential credential;
/**
* Default Constructor
* @deprecated Please use {@link #APIContext(String, String, String)} instead.
* APIContext ideally needs more information than just accessToken to operate correctly. Now, you do not need
* to fetch accessToken from {@link OAuthTokenCredential} separately. Instead, just initialize {@link APIContext} with
* clientId, clientSecret and mode, with optional configurations, as shown below, and pass the context to paypal API methods:
* <pre>
* {@code
* APIContext context = new APIContext(clientId, clientSecret, "sandbox");
* }
* </pre>
*/
public APIContext() {
super();
this.credential = new OAuthTokenCredential(null);
}
/**
* Pass the clientID, secret and mode. The easiest, and most widely used
* option.
*
* @param clientID
* @param clientSecret
* @param mode
*/
public APIContext(String clientID, String clientSecret, String mode) {
this(clientID, clientSecret, mode, null);
}
/**
* Pass the clientID, secret and mode along with additional configurations.
*
* @param clientID
* @param clientSecret
* @param mode
* @param configurations
*/
public APIContext(String clientID, String clientSecret, String mode, Map<String, String> configurations) {
this.credential = new OAuthTokenCredential(clientID, clientSecret);
if (configurations != null && configurations.size() > 0) {
this.credential.addConfigurations(configurations);
}
this.setMode(mode);
}
/**
* @deprecated Please use {@link #APIContext(String, String, String)} instead.
* APIContext ideally needs more information than just accessToken to operate correctly. Now, you do not need
* to fetch accessToken from {@link OAuthTokenCredential} separately. Instead, just initialize {@link APIContext} with
* clientId, clientSecret and mode, with optional configurations, as shown below, and pass the context to paypal API methods:
* <pre>
* {@code
* APIContext context = new APIContext(clientId, clientSecret, "sandbox");
* }
* </pre>
*
* @param accessToken
* OAuthToken required for the call. OAuth token used by the REST
* API service. The token should be of the form 'Bearer xxxx..'.
* See {@link OAuthTokenCredential} to generate OAuthToken
*/
public APIContext(String accessToken) {
super();
if (accessToken == null || accessToken.length() <= 0) {
throw new IllegalArgumentException("AccessToken cannot be null");
}
this.credential = new OAuthTokenCredential(accessToken);
}
/**
* @deprecated Please use {@link #APIContext(String, String, String)} instead.
* APIContext ideally needs more information than just accessToken to operate correctly. Now, you do not need
* to fetch accessToken from {@link OAuthTokenCredential} separately. Instead, just initialize {@link APIContext} with
* clientId, clientSecret and mode, with optional configurations, as shown below, and pass the context to paypal API methods:
* <pre>
* {@code
* APIContext context = new APIContext(clientId, clientSecret, "sandbox");
* }
* </pre>
*
* @param accessToken
* OAuthToken required for the call. OAuth token used by the REST
* API service. The token should be of the form 'Bearer xxxx..'.
* See {@link OAuthTokenCredential} to generate OAuthToken
* @param requestId
* Unique requestId required for the call. Idempotency id,
* Calling setMaskRequestId(true) will override the requestId
* getter to return null, which can be used by the client (null
* check) to forcibly not sent requestId in the API call.
*/
public APIContext(String accessToken, String requestId) {
this(accessToken);
if (requestId == null || requestId.length() <= 0) {
throw new IllegalArgumentException("RequestId cannot be null");
}
this.requestId = requestId;
}
/**
* Sets refresh token to be used for third party OAuth operations. This is commonly used for
* third party invoicing and future payments.
*
* @param refreshToken
* @return {@link APIContext}
*/
public APIContext setRefreshToken(String refreshToken) {
if (this.credential != null && this.credential.hasCredentials()) {
this.credential.setRefreshToken(refreshToken);
} else {
throw new IllegalArgumentException(
"ClientID and Secret are required. Please use APIContext(String clientID, String clientSecret, String mode)");
}
return this;
}
/**
* Sets mode to either `live` or `sandbox`.
* @param mode
* @return {@link APIContext}
*/
public APIContext setMode(String mode) {
if (mode == null || !(mode.equals(Constants.LIVE) || mode.equals(Constants.SANDBOX))) {
throw new IllegalArgumentException("Mode needs to be either `sandbox` or `live`.");
}
this.credential.addConfiguration(Constants.MODE, mode);
return this;
}
/**
* Enables settings for Google App Engine. Please set to `true` if using SDK in Google App Engine.
*
* @param usingGoogleAppEngine
* @return {@link APIContext}
*/
public APIContext usingGoogleAppEngine(boolean usingGoogleAppEngine) {
return this.addConfiguration(Constants.GOOGLE_APP_ENGINE, String.valueOf(usingGoogleAppEngine));
}
/**
* Returns HTTP Headers.
*
* @return the hTTPHeaders
*/
public Map<String, String> getHTTPHeaders() {
return this.credential.getHeaders();
}
public String getHTTPHeader(String key) {
return this.credential.getHeader(key);
}
/**
* Replaces existing headers with provided one.
*
* @param httpHeaders
* the httpHeaders to set
*/
public APIContext setHTTPHeaders(Map<String, String> httpHeaders) {
this.credential.setHeaders(httpHeaders);
return this;
}
/**
* Adds HTTP Headers to existing list
*
* @param httpHeaders
* the httpHeaders to set
*/
public APIContext addHTTPHeaders(Map<String, String> httpHeaders) {
this.credential.addHeaders(httpHeaders);
return this;
}
/**
* Adds HTTP Header to existing list
*
* @param key
* @param value
*/
public APIContext addHTTPHeader(String key, String value) {
this.credential.addHeader(key, value);
return this;
}
/**
* Returns Configuration Map
*
* @return {@link Map} of configurations
*/
public Map<String, String> getConfigurationMap() {
return this.credential.getConfigurations();
}
/**
* Replaces the existing configurations with provided one
*
* @param configurationMap
* the configurationMap to set
* @return {@link APIContext}
*/
public APIContext setConfigurationMap(Map<String, String> configurationMap) {
this.credential.setConfigurations(configurationMap);
return this;
}
/**
* Adds configurations
*
* @param configurations {@link Map} of configurations.
* @return {@link APIContext}
*/
public APIContext addConfigurations(Map<String, String> configurations) {
this.credential.addConfigurations(configurations);
return this;
}
/**
* Adds configuration
*
* @param key key
* @param value value
* @return {@link APIContext}
*/
public APIContext addConfiguration(String key, String value) {
this.credential.addConfiguration(key, value);
return this;
}
/**
* Returns string value of specific configuration.
*
* @param key key
* @return {@link String} value of specific configuration.
*/
public String getConfiguration(String key) {
return this.credential.getConfiguration(key);
}
/**
* @deprecated Please use {@link #fetchAccessToken()} instead.
* Previously, this was a dumb getter method. However, we enabled the feature to re-generate the access Token if null, or expired.
* This required us to throw proper PayPalRESTException, with error information on failure.
*
* @return Access Token
*/
public String getAccessToken() {
try {
return fetchAccessToken();
} catch (PayPalRESTException ex) {
// we should be throwing proper exception here.
return null;
}
}
/**
* Returns the access Token. Regenerates if null or expired.
*
* @return {@link String} of AccessToken
* @throws PayPalRESTException
*/
public String fetchAccessToken() throws PayPalRESTException {
if (this.credential != null) {
return this.credential.getAccessToken();
}
return null;
}
/**
* Returns the unique requestId set during creation, if not available and if
* maskRequestId is set to false returns a generated one, else returns null.
*
* @return requestId
*/
public String getRequestId() {
String reqId = null;
if (!maskRequestId) {
if (requestId == null || requestId.length() <= 0) {
requestId = UUID.randomUUID().toString();
}
reqId = requestId;
}
return reqId;
}
/**
* Sets the requestId to be sent on each request. Used for idempotency purposes.
* requestId is auto generated if not passed explicitly.
*
* @param requestId request Id
* @return APIContext
*/
public APIContext setRequestId(String requestId) {
this.requestId = requestId;
return this;
}
/**
* @param maskRequestId
* the maskRequestId to set
*/
public void setMaskRequestId(boolean maskRequestId) {
this.maskRequestId = maskRequestId;
}
/**
* @return the sdkVersion
*/
public SDKVersion getSdkVersion() {
return sdkVersion;
}
/**
* @param sdkVersion
* the sdkVersion to set
*/
public void setSdkVersion(SDKVersion sdkVersion) {
this.sdkVersion = sdkVersion;
}
/**
* @deprecated Use getHTTPHeaders() instead
* @return the headersMap
*/
public Map<String, String> getHeadersMap() {
return this.getHTTPHeaders();
}
/**
* @deprecated Please use {@link #setHTTPHeaders(Map)} or {@link #addHTTPHeaders(Map)} instead.
*
* @param headersMap
* the headersMap to set
*/
public void setHeadersMap(Map<String, String> headersMap) {
this.setHTTPHeaders(headersMap);
}
public String getClientID() {
if (this.credential == null) {
throw new IllegalArgumentException(
"ClientID and Secret are required. Please use APIContext(String clientID, String clientSecret, String mode)");
}
return this.credential.getClientID();
}
public String getClientSecret() {
if (this.credential == null) {
throw new IllegalArgumentException(
"ClientID and Secret are required. Please use APIContext(String clientID, String clientSecret, String mode)");
}
return this.credential.getClientSecret();
}
}
| 3,923 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/RESTUtil.java
|
package com.paypal.base.rest;
import com.paypal.api.openidconnect.CreateFromAuthorizationCodeParameters;
import com.paypal.api.openidconnect.CreateFromRefreshTokenParameters;
import com.paypal.api.openidconnect.UserinfoParameters;
import com.paypal.base.Constants;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
/**
* <code>RESTUtil</code> acts as utility class used by REST API system
*/
public final class RESTUtil {
private RESTUtil() {
}
/**
* Formats the URI path for REST calls.
*
* @param pattern
* URI pattern with place holders for replacement strings
* @param parameters
* Replacement objects
* @return Formatted URI path
*/
public static String formatURIPath(String pattern, Object[] parameters) {
String formattedPath = null;
Object[] finalParameters = null;
if (pattern != null) {
if (parameters != null
&& parameters.length == 1
&& parameters[0] instanceof CreateFromAuthorizationCodeParameters) {
// Form a object array using the passed
// CreateFromAuthorizationCodeParameters
finalParameters = splitParameters(pattern,
((CreateFromAuthorizationCodeParameters) parameters[0])
.getContainerMap());
} else if (parameters != null
&& parameters.length == 1
&& parameters[0] instanceof CreateFromRefreshTokenParameters) {
// Form a object array using the passed
// CreateFromRefreshTokenParameters
finalParameters = splitParameters(pattern,
((CreateFromRefreshTokenParameters) parameters[0])
.getContainerMap());
} else if (parameters != null && parameters.length == 1
&& parameters[0] instanceof UserinfoParameters) {
// Form a object array using the passed UserinfoParameters
finalParameters = splitParameters(pattern,
((UserinfoParameters) parameters[0]).getContainerMap());
} else if (parameters != null && parameters.length == 1
&& parameters[0] instanceof QueryParameters) {
// Form a object array using the passed UserinfoParameters
finalParameters = splitParameters(pattern,
((QueryParameters) parameters[0]).getContainerMap());
} else if (parameters != null && parameters.length == 1
&& parameters[0] instanceof Map<?, ?>) {
// Form a object array using the passed Map
finalParameters = splitParameters(pattern,
((Map<?, ?>) parameters[0]));
} else {
finalParameters = parameters;
}
// Perform a simple message formatting
String fString = MessageFormat.format(pattern, finalParameters);
// Process the resultant string for removing nulls
formattedPath = removeNullsInQS(fString);
}
return formattedPath;
}
/**
* Formats the URI path for REST calls. Replaces any occurrences of the form
* {name} in pattern with the corresponding value of key name in the passes
* {@link Map}
*
* @param pattern
* URI pattern with named place holders
* @param pathParameters
* Parameter {@link Map}
* @return Processed URI path
* @throws PayPalRESTException
*/
public static String formatURIPath(String pattern,
Map<String, String> pathParameters) throws PayPalRESTException {
return formatURIPath(pattern, pathParameters, new HashMap<String, String>());
}
/**
* Formats the URI path for REST calls. Replaces any occurrences of the form
* {name} in pattern with the corresponding value of key name in the passes
* {@link Map}. Query parameters are appended to the end of the URI path
*
* @param pattern
* URI pattern with named place holders
* @param queryParameters
* Query parameters {@link Map}
* @param pathParameters
* Parameter {@link String...}
* @return Processed URI path
* @throws PayPalRESTException
*/
public static String formatURIPath(String pattern, Map<String, String> queryParameters, String... pathParameters)
throws PayPalRESTException {
Map<String, String> pathParams = new HashMap<String, String>();
if (pathParameters != null) {
for (int i = 0; i < pathParameters.length; i++) {
pathParams.put(String.valueOf(i), pathParameters[i]);
}
}
return formatURIPath(pattern, pathParams, queryParameters);
}
/**
* Formats the URI path for REST calls. Replaces any occurrences of the form
* {name} in pattern with the corresponding value of key name in the passes
* {@link Map}. Query parameters are appended to the end of the URI path
*
* @param pattern
* URI pattern with named place holders
* @param pathParameters
* Parameter {@link Map}
* @param queryParameters
* Query parameters {@link Map}
* @return Processed URI path
* @throws PayPalRESTException
*/
public static String formatURIPath(String pattern,
Map<String, String> pathParameters,
Map<String, String> queryParameters) throws PayPalRESTException {
String formattedURIPath = null;
if (pattern != null && pattern.trim().length() > 0
&& pathParameters != null && pathParameters.size() > 0) {
for (Entry<String, String> entry : pathParameters.entrySet()) {
String placeHolderName = "{" + entry.getKey().trim() + "}";
if (pattern.contains(placeHolderName)) {
pattern = pattern.replace(placeHolderName, entry.getValue()
.trim());
}
}
}
formattedURIPath = pattern;
if (queryParameters != null && queryParameters.size() > 0) {
StringBuilder stringBuilder = new StringBuilder(formattedURIPath);
if (stringBuilder.toString().contains("?")) {
if (!(stringBuilder.toString().endsWith("?")
|| stringBuilder.toString().endsWith("&"))) {
stringBuilder.append("&");
}
} else {
stringBuilder.append("?");
}
for (Entry<String, String> entry : queryParameters.entrySet()) {
try {
stringBuilder
.append(URLEncoder.encode(entry.getKey(), "UTF-8"))
.append("=")
.append(URLEncoder.encode(entry.getValue(), "UTF-8"))
.append("&");
} catch (UnsupportedEncodingException e) {
// TODO
}
}
formattedURIPath = stringBuilder.toString();
}
if (formattedURIPath.contains("{") || formattedURIPath.contains("}")) {
throw new PayPalRESTException("Unable to formatURI Path : "
+ formattedURIPath
+ ", unable to replace placeholders with the map : "
+ pathParameters);
}
return formattedURIPath;
}
/**
* Remove null parameters from query string
*
* @param fString
* Formatted String
* @return Nulls removed query string
*/
private static String removeNullsInQS(String fString) {
String formattedString = fString;
if (fString != null && fString.length() != 0) {
String[] parts = fString.split("\\?");
// Process the query string part
if (parts.length == 2) {
String queryString = parts[1];
String[] querys = queryString.split("&");
if (querys.length > 0) {
StringBuilder strBuilder = new StringBuilder();
for (String query : querys) {
String[] valueSplit = query.split("=");
if (valueSplit.length == 2) {
if ("null".equalsIgnoreCase(valueSplit[1].trim())) {
continue;
} else if ("".equals(valueSplit[1].trim())) {
continue;
} else {
strBuilder.append(query).append("&");
}
} else if (valueSplit.length < 2) {
continue;
}
}
formattedString = (!strBuilder.toString().endsWith("&")) ? strBuilder
.toString() : strBuilder.toString().substring(0,
strBuilder.toString().length() - 1);
}
// append the query string delimiter
formattedString = (parts[0].trim() + "?") + formattedString;
}
}
return formattedString;
}
/**
* Split the URI and form a Object array using the query string and values
* in the provided map. The return object array is populated only if the map
* contains valid value for the query name. The object array contains null
* values if there is no value found in the map
*
* @param pattern
* URI pattern
* @param containerMap
* Map containing the query name and value
* @return Object array
*/
private static Object[] splitParameters(String pattern,
Map<?, ?> containerMap) {
List<Object> objectList = new ArrayList<Object>();
String[] query = pattern.split("\\?");
if (query != null && query.length == 2 && query[1].contains("={")) {
String[] queries = query[1].split("&");
if (queries != null) {
for (String q : queries) {
String[] params = q.split("=");
if (params != null && params.length == 2) {
String key = params[0].trim();
if (containerMap.containsKey(key)) {
Object object = containerMap.get(key);
try {
objectList.add(URLEncoder.encode(
(String) object, Constants.ENCODING_FORMAT));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} else {
objectList.add(null);
}
}
}
}
}
return objectList.toArray();
}
}
| 3,924 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/AccessToken.java
|
package com.paypal.base.rest;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.experimental.Accessors;
@Getter
@AllArgsConstructor
@EqualsAndHashCode
@Accessors(chain = true)
public class AccessToken {
private String accessToken;
private long expires = 0;
/**
* Specifies how long this token can be used for placing API calls. The
* remaining lifetime is given in seconds.
*
* @return remaining lifetime of this access token in seconds
*/
public long expiresIn() {
return expires - new java.util.Date().getTime() / 1000;
}
}
| 3,925 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/OAuthTokenCredential.java
|
package com.paypal.base.rest;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import com.paypal.base.*;
import com.paypal.base.codec.binary.Base64;
import com.paypal.base.exception.ClientActionRequiredException;
import com.paypal.base.exception.HttpErrorException;
import com.paypal.base.sdk.info.SDKVersionImpl;
import com.paypal.base.util.UserAgentHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* OAuthTokenCredential is used for generation of OAuth Token used by PayPal
* REST API service. ClientID and ClientSecret are required by the class to
* generate OAuth Token, the resulting token is of the form "Bearer xxxxxx". The
* class has two constructors, one of it taking an additional configuration map
* used for dynamic configuration. When using the constructor with out
* configuration map the endpoint is fetched from the configuration that is used
* during initialization. See {@link PayPalResource} for configuring the system.
* When using a configuration map the class expects an entry by the name
* "oauth.EndPoint" or "service.EndPoint" to retrieve the value of the endpoint
* for the OAuth Service. If either are not present the configuration should
* have a entry by the name "mode" with values sandbox or live wherein the
* corresponding endpoints are default to PayPal endpoints.
*/
public final class OAuthTokenCredential {
private static final Logger log = LoggerFactory.getLogger(OAuthTokenCredential.class);
private static Map<String, AccessToken> ACCESS_TOKENS = new ConcurrentHashMap<String, AccessToken>();
/**
* OAuth URI path parameter
*/
private static String OAUTH_TOKEN_PATH = "/v1/oauth2/token";
/**
* Client ID for OAuth
*/
private String clientID;
/**
* Client Secret for OAuth
*/
private String clientSecret;
/**
* Headers
*/
private Map<String, String> headers = new HashMap<String, String>();
private String refreshToken;
/**
* @deprecated This field is only used when OAuthTokenCredential is initialized with access token only.
* If the access token is directly passed in as a part of constructor, we will always pass this back.
*/
@Deprecated
private final AccessToken __accessToken;
/**
* Map used for dynamic configuration
*/
private Map<String, String> configurationMap;
/**
* {@link SDKVersion} instance
*/
private SDKVersion sdkVersion;
/**
* Sets the URI path for the OAuth Token service. If not set it defaults to
* "/v1/oauth2/token"
*
* @param oauthTokenPath
* the URI part to set
*/
public static void setOAUTH_TOKEN_PATH(String oauthTokenPath) {
OAUTH_TOKEN_PATH = oauthTokenPath;
}
/**
* Constructor that takes in Access Token. Only used internally. Please do not use for external integrations.
*
* @param accessToken
*/
@Deprecated
OAuthTokenCredential(String accessToken) {
__accessToken = new AccessToken(accessToken, 1);
}
/**
* Pass clientId and secret to OAuthTokenCredential.
*
* @param clientID
* Client ID for the OAuth
* @param clientSecret
* Client Secret for OAuth
*/
public OAuthTokenCredential(String clientID, String clientSecret) {
super();
this.clientID = clientID;
this.clientSecret = clientSecret;
this.configurationMap = SDKUtil.combineDefaultMap(ConfigManager.getInstance().getConfigurationMap());
this.sdkVersion = new SDKVersionImpl();
this.__accessToken = null;
}
/**
* Configuration Constructor for dynamic configuration
*
* @param clientID
* Client ID for the OAuth
* @param clientSecret
* Client Secret for OAuth
* @param configurationMap
* Dynamic configuration map which should have an entry for
* 'oauth.EndPoint' or 'service.EndPoint'. If either are not
* present then there should be entry for 'mode' with values
* sandbox/live, wherein PayPals endpoints are used.
*/
public OAuthTokenCredential(String clientID, String clientSecret, Map<String, String> configurationMap) {
super();
this.clientID = clientID;
this.clientSecret = clientSecret;
this.configurationMap = SDKUtil.combineDefaultMap(configurationMap);
this.sdkVersion = new SDKVersionImpl();
this.__accessToken = null;
}
/**
* Sets refresh token to be used for third party OAuth operations. This is commonly used for
* third party invoicing and future payments.
* This method is for internal use only. Please use {@link APIContext#setRefreshToken(String)} for your integration needs.
*
* @param refreshToken
* @return {@link OAuthTokenCredential}
*/
synchronized OAuthTokenCredential setRefreshToken(String refreshToken) {
if (!this.hasCredentials()) {
throw new IllegalArgumentException("ClientID and Secret are required. Please use OAuthTokenCredential(String clientID, String clientSecret)");
}
this.refreshToken = refreshToken;
return this;
}
/**
* Checks if clientID and secret are set.
*
* @return {@link Boolean}
*/
public boolean hasCredentials() {
return (this.clientID != null) && (this.clientSecret != null);
}
/**
* Sets Headers for every calls.
*
* @param headers
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential setHeaders(Map<String, String> headers) {
this.headers = headers;
return this;
}
/**
* Adds headers.
*
* @param headers
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential addHeaders(Map<String, String> headers) {
this.headers.putAll(headers);
return this;
}
/**
* Adds header to existing list of headers.
*
* @param key
* @param value
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential addHeader(String key, String value) {
this.headers.put(key, value);
return this;
}
/**
* Returns the list of headers
*
* @return {@link Map} of headers
*/
public Map<String, String> getHeaders() {
return this.headers;
}
/**
* Returns the header value
*
* @return {@link String} value of header
*/
public String getHeader(String key) {
return this.headers.get(key);
}
/**
* Computes Access Token by placing a call to OAuth server using ClientID
* and ClientSecret. The token is appended to the token type (Bearer).
*
* @return the accessToken
* @throws PayPalRESTException
*/
public String getAccessToken() throws PayPalRESTException {
if (__accessToken != null) {
return __accessToken.getAccessToken();
}
synchronized (ACCESS_TOKENS) {
if (isRegenerationRequired()) {
generateAccessToken();
}
return ACCESS_TOKENS.get(getCacheTokenKey()).getAccessToken();
}
}
/**
* Checks if the access token is expired or null.
*
* @return true if expired or null, and can be regenerated.
* false otherwise
*/
private boolean isRegenerationRequired() {
AccessToken token = ACCESS_TOKENS.get(getCacheTokenKey());
if (token == null) {
return hasCredentials();
} else {
return (token.getAccessToken() == null || (hasCredentials() && token.expiresIn() <= 0));
}
}
/**
* Computes Access Token by doing a Base64 encoding on the ClientID and
* ClientSecret. The token is appended to the String "Basic ".
*
* @return the accessToken
* @throws PayPalRESTException
*/
public String getAuthorizationHeader() throws PayPalRESTException {
String base64EncodedString = generateBase64String(clientID + ":" + clientSecret);
return "Basic " + base64EncodedString;
}
/**
* Returns clientID
*
* @return {@link String} containing clientID
*/
public String getClientID() {
return this.clientID;
}
/**
* Returns clientSecret
*
* @return {@link String} containing clientSecret
*/
public String getClientSecret() {
return this.clientSecret;
}
/**
* Specifies how long this token can be used for placing API calls. The
* remaining lifetime is given in seconds.
*
* @return remaining lifetime of this access token in seconds
*/
public long expiresIn() {
AccessToken token = ACCESS_TOKENS.get(getCacheTokenKey());
return token != null ? token.getExpires() : -1;
}
/**
* Adds configuration to list of configurations.
*
* @param key
* @param value
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential addConfiguration(String key, String value) {
if (this.configurationMap == null) {
this.configurationMap = new HashMap<String, String>();
}
this.configurationMap.put(key, value);
return this;
}
/**
* Adds configurations to list of configurations.
* @param configurations
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential addConfigurations(Map<String, String> configurations) {
if (this.configurationMap == null) {
this.configurationMap = new HashMap<String, String>();
}
this.configurationMap.putAll(configurations);
return this;
}
/**
* Replaces existing configurations with provided map of configurations.
*
* @param configurations
* @return {@link OAuthTokenCredential}
*/
public OAuthTokenCredential setConfigurations(Map<String, String> configurations) {
this.configurationMap = configurations;
return this;
}
/**
* Returns list of configurations.
*
* @return {@link Map} of configurations
*/
public Map<String, String> getConfigurations() {
return this.configurationMap;
}
/**
* Returns specific configuration.
*
* @param key
* @return {@link String} value of configuration
*/
public String getConfiguration(String key) {
if (this.configurationMap != null) {
return this.configurationMap.get(key);
}
return null;
}
private synchronized void generateAccessToken() throws PayPalRESTException {
HttpConnection connection;
HttpConfiguration httpConfiguration;
String base64ClientID = generateBase64String(clientID + ":" + clientSecret);
try {
connection = ConnectionManager.getInstance().getConnection();
httpConfiguration = getOAuthHttpConfiguration();
connection.createAndconfigureHttpConnection(httpConfiguration);
this.headers.put(Constants.AUTHORIZATION_HEADER, "Basic " + base64ClientID);
// Accept only json output
this.headers.put(Constants.HTTP_ACCEPT_HEADER, Constants.HTTP_CONTENT_TYPE_JSON);
this.headers.put(Constants.HTTP_CONTENT_TYPE_HEADER, Constants.HTTP_CONFIG_DEFAULT_CONTENT_TYPE);
UserAgentHeader userAgentHeader = new UserAgentHeader(sdkVersion != null ? sdkVersion.getSDKId() : null,
sdkVersion != null ? sdkVersion.getSDKVersion() : null);
this.headers.putAll(userAgentHeader.getHeader());
String postRequest = getRequestPayload();
// log request
String mode = configurationMap.get(Constants.MODE);
if (Constants.LIVE.equalsIgnoreCase(mode) && log.isDebugEnabled()) {
log.warn("Log level cannot be set to DEBUG in " + Constants.LIVE
+ " mode. Skipping request/response logging...");
}
if (!Constants.LIVE.equalsIgnoreCase(mode)) {
log.debug("request header: " + this.headers.toString());
log.debug("request body: " + postRequest);
}
// send request and get & log response
String jsonResponse = connection.execute("", postRequest, this.headers);
if (!Constants.LIVE.equalsIgnoreCase(mode)) {
log.debug("response header: " + connection.getResponseHeaderMap().toString());
log.debug("response: " + jsonResponse);
}
// parse response as JSON object
JsonParser parser = new JsonParser();
JsonElement jsonElement = parser.parse(jsonResponse);
String accessToken = jsonElement.getAsJsonObject().get("token_type").getAsString() + " "
+ jsonElement.getAsJsonObject().get("access_token").getAsString();
// Save expiry date
long tokenLifeTime = jsonElement.getAsJsonObject().get("expires_in").getAsLong();
long expires = new Date().getTime() / 1000 + tokenLifeTime;
ACCESS_TOKENS.put(getCacheTokenKey(), new AccessToken(accessToken, expires));
} catch (ClientActionRequiredException e) {
throw PayPalRESTException.createFromHttpErrorException(e);
} catch (HttpErrorException e) {
throw PayPalRESTException.createFromHttpErrorException(e);
} catch (Exception e) {
throw new PayPalRESTException(e.getMessage(), e);
} finally {
// Replace the headers back to JSON for any future use.
this.headers.put(Constants.HTTP_CONTENT_TYPE_HEADER, Constants.HTTP_CONTENT_TYPE_JSON);
}
}
private String getCacheTokenKey() {
return clientID + ":" + clientSecret + ":" + refreshToken;
}
/*
* Generate a Base64 encoded String from clientID & clientSecret
*/
private String generateBase64String(String clientCredentials) throws PayPalRESTException {
String base64ClientID = null;
byte[] encoded = null;
try {
encoded = Base64.encodeBase64(clientCredentials.getBytes("UTF-8"));
base64ClientID = new String(encoded, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new PayPalRESTException(e.getMessage(), e);
}
return base64ClientID;
}
/**
* Returns the request payload for OAuth Service. Override this method to
* alter the payload
*
* @return Payload as String
*/
protected String getRequestPayload() {
if (this.refreshToken != null) {
return String.format("grant_type=refresh_token&refresh_token=%s", this.refreshToken);
} else {
return "grant_type=client_credentials";
}
}
/*
* Get HttpConfiguration object for OAuth server
*/
protected HttpConfiguration getOAuthHttpConfiguration() throws MalformedURLException {
HttpConfiguration httpConfiguration = new HttpConfiguration();
httpConfiguration
.setHttpMethod(Constants.HTTP_CONFIG_DEFAULT_HTTP_METHOD);
/*
* Check for property 'mode' property in the configuration, if not
* found, check for 'oauth.EndPoint' property in the configuration and default
* endpoint to PayPal sandbox or live endpoints. Throw exception if the
* above rules fail
*/
final String mode = this.configurationMap.get(Constants.MODE);
// Default to Endpoint param.
String endPointUrl = this.configurationMap.get(Constants.OAUTH_ENDPOINT);
if (endPointUrl == null || endPointUrl.trim().isEmpty()) {
if (Constants.SANDBOX.equalsIgnoreCase(mode)) {
endPointUrl = Constants.REST_SANDBOX_ENDPOINT;
} else if (Constants.LIVE.equalsIgnoreCase(mode)) {
endPointUrl = Constants.REST_LIVE_ENDPOINT;
} else if (endPointUrl == null || endPointUrl.length() <= 0) {
// Default to Normal endpoint
endPointUrl = this.configurationMap.get(Constants.ENDPOINT);
}
}
// If none of the option works, throw exception.
if (endPointUrl == null || endPointUrl.trim().length() <= 0) {
throw new MalformedURLException(
"oauth.Endpoint, mode or service.EndPoint not set not configured to sandbox/live ");
}
if (Boolean
.parseBoolean(configurationMap.get(Constants.USE_HTTP_PROXY))) {
httpConfiguration.setProxySet(true);
httpConfiguration.setProxyHost(configurationMap
.get(Constants.HTTP_PROXY_HOST));
httpConfiguration.setProxyPort(Integer.parseInt(configurationMap
.get(Constants.HTTP_PROXY_PORT)));
String proxyUserName = configurationMap
.get(Constants.HTTP_PROXY_USERNAME);
String proxyPassword = configurationMap
.get(Constants.HTTP_PROXY_PASSWORD);
if (proxyUserName != null && proxyPassword != null) {
httpConfiguration.setProxyUserName(proxyUserName);
httpConfiguration.setProxyPassword(proxyPassword);
}
}
endPointUrl = (endPointUrl.endsWith("/")) ? endPointUrl.substring(0,
endPointUrl.length() - 1) : endPointUrl;
endPointUrl += OAUTH_TOKEN_PATH;
httpConfiguration.setEndPointUrl(endPointUrl);
httpConfiguration
.setGoogleAppEngine(Boolean.parseBoolean(configurationMap
.get(Constants.GOOGLE_APP_ENGINE)));
httpConfiguration.setConnectionTimeout(Integer
.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_TIMEOUT)));
httpConfiguration.setMaxRetry(Integer.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_RETRY)));
httpConfiguration.setReadTimeout(Integer.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_READ_TIMEOUT)));
httpConfiguration.setMaxHttpConnection(Integer
.parseInt(configurationMap
.get(Constants.HTTP_CONNECTION_MAX_CONNECTION)));
httpConfiguration.setIpAddress(configurationMap
.get(Constants.DEVICE_IP_ADDRESS));
return httpConfiguration;
}
}
| 3,926 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/PayPalRESTException.java
|
package com.paypal.base.rest;
import com.google.gson.Gson;
import com.paypal.api.payments.Error;
import com.paypal.base.exception.HttpErrorException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* PayPalException handles all exceptions related to REST services
*/
public class PayPalRESTException extends Exception {
private static final Logger log = LoggerFactory
.getLogger(PayPalRESTException.class);
/**
* Serial Version ID
*/
private static final long serialVersionUID = 1L;
/**
* If source is {@link HttpErrorException},
* exception's response code value is copied
*/
private int responsecode;
/**
* If source is {@link HttpErrorException} and response code is 400,
* error response content is converted to {@link Error} object
*/
private Error details;
public PayPalRESTException(String message) {
super(message);
}
public PayPalRESTException(String message, Throwable throwable) {
super(message, throwable);
}
public PayPalRESTException(Throwable throwable) {
super(throwable);
}
public int getResponsecode() {
return responsecode;
}
public void setResponsecode(int responsecode) {
this.responsecode = responsecode;
}
public Error getDetails() {
return details;
}
public void setDetails(Error details) {
this.details = details;
}
/**
* Utility method that creates a {@link PayPalRESTException} object from {@link HttpErrorException}.
* if {@link HttpErrorException} contains 400 response code, error response is converted to {@link Error} object.
*
* @param httpErrorException
* {@link HttpErrorException} thrown from API call
* @return PayPalRESTException
*/
protected static PayPalRESTException createFromHttpErrorException(HttpErrorException httpErrorException){
PayPalRESTException ppre = new PayPalRESTException(httpErrorException.getMessage(), httpErrorException);
ppre.setResponsecode(httpErrorException.getResponsecode());
if( httpErrorException.getResponsecode() >= 400 && httpErrorException.getErrorResponse()!=null && isJSONValid(httpErrorException.getErrorResponse())) {
try{
Error details = JSONFormatter.fromJSON(httpErrorException.getErrorResponse(), Error.class);
ppre.setDetails(details);
} catch(Exception e){
log.error("Exception thrown while parsing error response: " + httpErrorException.getErrorResponse() , e);
}
}
return ppre;
}
public String toString() {
return "response-code: " + this.responsecode + "\tdetails: " + this.details;
}
private static boolean isJSONValid(String jsonInString) {
try {
new Gson().fromJson(jsonInString, Object.class);
return true;
} catch(com.google.gson.JsonSyntaxException ex) {
return false;
}
}
}
| 3,927 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/JSONFormatter.java
|
package com.paypal.base.rest;
import com.google.gson.FieldNamingPolicy;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
/**
* JSONFormatter converts objects to JSON representation and vice-versa. This
* class depends on Google's GSON library to do the transformation. This class
* is not thread-safe.
*/
public final class JSONFormatter {
/*
* JSONFormatter is coupled to the stubs generated using the SDK generator.
* Since PayPal REST APIs support only JSON, this class is bound to the
* stubs for their json representation.
*/
private JSONFormatter() {
}
/**
* FieldNamingPolicy used by the underlying Gson library. Alter this
* property to set a fieldnamingpolicy other than
* LOWER_CASE_WITH_UNDERSCORES used by PayPal REST APIs
*/
private static FieldNamingPolicy FIELD_NAMING_POLICY = FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES;
/**
* Gson
*/
public static Gson GSON = new GsonBuilder().setPrettyPrinting()
.setFieldNamingPolicy(FIELD_NAMING_POLICY).create();
/**
* Set a format for gson FIELD_NAMING_POLICY. See {@link FieldNamingPolicy}
*
* @param FIELD_NAMING_POLICY
*/
public static final void setFIELD_NAMING_POLICY(
FieldNamingPolicy FIELD_NAMING_POLICY) {
GSON = new GsonBuilder().setPrettyPrinting()
.setFieldNamingPolicy(FIELD_NAMING_POLICY).create();
}
/**
* Converts a Raw Type to JSON String
*
* @param <T>
* Type to be converted
* @param t
* Object of the type
* @return JSON representation
*/
public static <T> String toJSON(T t) {
return GSON.toJson(t);
}
/**
* Converts a JSON String to object representation
*
* @param <T>
* Type to be converted
* @param responseString
* JSON representation
* @param clazz
* Target class
* @return Object of the target type
*/
public static <T> T fromJSON(String responseString, Class<T> clazz) {
T t = null;
if (clazz.isAssignableFrom(responseString.getClass())) {
t = clazz.cast(responseString);
} else {
t = GSON.fromJson(responseString, clazz);
}
return t;
}
}
| 3,928 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/PayPalModel.java
|
package com.paypal.base.rest;
import lombok.EqualsAndHashCode;
@EqualsAndHashCode
public class PayPalModel {
/**
* Returns a JSON string corresponding to object state
*
* @return JSON representation
*/
public String toJSON() {
return JSONFormatter.toJSON(this);
}
@Override
public String toString() {
return toJSON();
}
}
| 3,929 |
0 |
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base
|
Create_ds/PayPal-Java-SDK/rest-api-sdk/src/main/java/com/paypal/base/rest/RESTAPICallPreHandler.java
|
package com.paypal.base.rest;
import com.paypal.base.APICallPreHandler;
import com.paypal.base.Constants;
import com.paypal.base.SDKUtil;
import com.paypal.base.SDKVersion;
import com.paypal.base.codec.binary.Base64;
import com.paypal.base.exception.ClientActionRequiredException;
import com.paypal.base.util.UserAgentHeader;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* RESTApiCallPreHandler acts as a {@link APICallPreHandler} for REST API calls.
* The implementation is PayPal specific, To do custom implementation override
* the protected methods
*/
public class RESTAPICallPreHandler implements APICallPreHandler {
/*
* RESTApiCallPreHandler requires a configuration system to function
* properly. The configuration is initialized to default in PayPalResource
* class if no configuration methods initConfig(..) was attempted before
* making the API call. The users can override this default file
* 'sdk_config.properties' by choosing different version of
* initConfi(...) and passing their custom configuration.
* Initializing to default means the system looks for a file specifically
* named 'sdk_config.properties' in the classpath and reads the
* configuration from there. 'Dynamic Configuration' enables the users to
* pass custom configuration (per call basis) as a Map object to override
* the default behavior for the system to function. For Dynamic
* configuration to take effect create a Map of custom configuration and set
* it in APIContext object, choose the overloaded method of the Resource
* class that takes APIContext object as a parameter and pass the APIContext
* object.
*/
/**
* Configuration Map used for dynamic configuration
*/
private Map<String, String> configurationMap = null;
/**
* Base URL for the service
*/
private URL url;
/**
* Authorization token
*/
private String authorizationToken;
/**
* Resource URI as defined in the WSDL
*/
private String resourcePath;
/**
* Request Id
*/
private String requestId;
/**
* Custom headers Map
*/
private Map<String, String> headersMap;
/**
* Request Payload
*/
private String payLoad;
/**
* {@link SDKVersion} instance
*/
private SDKVersion sdkVersion;
/**
* Constructor using configurations dynamically
*
* @param configurationMap
* Map used for dynamic configuration
*/
public RESTAPICallPreHandler(Map<String, String> configurationMap) {
this.configurationMap = SDKUtil.combineDefaultMap(configurationMap);
}
/**
* Constructor using a Map of headers for forming custom headers
*
* @param configurationMap
* Map used for dynamic configuration
* @param headersMap
* Headers Map
*/
public RESTAPICallPreHandler(Map<String, String> configurationMap,
Map<String, String> headersMap) {
this(configurationMap);
this.headersMap = (headersMap == null) ? Collections
.<String, String> emptyMap() : headersMap;
}
/**
* @param authorizationToken
* the authorizationToken to set
*/
public void setAuthorizationToken(String authorizationToken) {
this.authorizationToken = authorizationToken;
}
/**
* @param resourcePath
* the resourcePath to set
*/
public void setResourcePath(String resourcePath) {
this.resourcePath = resourcePath;
}
/**
* @param requestId
* the requestId to set
*/
public void setRequestId(String requestId) {
this.requestId = requestId;
}
/**
* @param payLoad
* the payLoad to set
*/
public void setPayLoad(String payLoad) {
this.payLoad = payLoad;
}
/**
* @param sdkVersion
* the sdkVersion to set
*/
public void setSdkVersion(SDKVersion sdkVersion) {
this.sdkVersion = sdkVersion;
}
/**
* Returns HTTP headers as a {@link Map}
*
* @return {@link Map} of Http headers
*/
public Map<String, String> getHeaderMap() {
return getProcessedHeaderMap();
}
public String getPayLoad() {
return getProcessedPayLoad();
}
public String getEndPoint() {
/*
* Process the EndPoint to append the resourcePath sent as a part of the
* method call with the base endPoint retrieved from configuration
* system
*/
String endPoint = null;
try {
endPoint = getBaseURL().toURI().resolve(resourcePath).toString();
} catch (MalformedURLException e) {
//
} catch (URISyntaxException e) {
//
}
return endPoint;
}
public OAuthTokenCredential getCredential() {
return null;
}
public void validate() throws ClientActionRequiredException {
// TODO
}
/**
* Returns the base URL configured in application resources or {@link Map}
* passed for dynamic configuration
*
* @return BaseUrl ending with a '/' character {@link URL}
* @throws MalformedURLException
* if endpoint cannot be found or formed
*/
public URL getBaseURL() throws MalformedURLException {
/*
* Check for property 'mode' property in the configuration, if not
* found, check for 'service.EndPoint' property in the configuration and default
* endpoint to PayPal sandbox or live endpoints. Throw exception if the
* above rules fail
*/
if (url == null) {
String mode = this.configurationMap.get(Constants.MODE);
// Default to Endpoint param.
String urlString = this.configurationMap.get(Constants.ENDPOINT);
if (urlString == null || urlString.trim().isEmpty()) {
if (Constants.SANDBOX.equalsIgnoreCase(mode)) {
urlString = Constants.REST_SANDBOX_ENDPOINT;
} else if (Constants.LIVE.equalsIgnoreCase(mode)) {
urlString = Constants.REST_LIVE_ENDPOINT;
}
}
// If none of the option works, throw exception.
if (urlString == null || urlString.trim().length() <= 0) {
throw new MalformedURLException(
"service.EndPoint not set (OR) mode not configured to sandbox/live ");
}
if (!urlString.endsWith("/")) {
urlString += "/";
}
url = new URL(urlString);
}
return url;
}
/**
* @param urlString
* the url to set
*/
public void setUrl(String urlString) throws MalformedURLException {
if (urlString != null && urlString.length() > 0) {
String uString = urlString.endsWith("/") ? urlString : urlString
+ "/";
this.url = new URL(uString);
} else {
this.url = getBaseURL();
}
}
/**
* Returns User-Agent header
*
* @return {@link Map} storing the User-Agent header
*/
protected Map<String, String> formUserAgentHeader() {
UserAgentHeader userAgentHeader = new UserAgentHeader(
sdkVersion != null ? sdkVersion.getSDKId() : null,
sdkVersion != null ? sdkVersion.getSDKVersion() : null);
return userAgentHeader.getHeader();
}
/*
* Return Client ID from configuration Map
*/
private String getClientID() {
return this.configurationMap.get(Constants.CLIENT_ID);
}
/*
* Returns Client Secret from configuration Map
*/
private String getClientSecret() {
return this.configurationMap.get(Constants.CLIENT_SECRET);
}
/*
* Encodes Client ID and Client Secret in Base 64
*/
private String encodeToBase64(String clientID, String clientSecret)
throws UnsupportedEncodingException {
String base64ClientID = generateBase64String(clientID + ":"
+ clientSecret);
return base64ClientID;
}
/*
* Generate a Base64 encoded String from clientID & clientSecret
*/
private String generateBase64String(String clientID)
throws UnsupportedEncodingException {
String base64ClientID = null;
byte[] encoded = null;
encoded = Base64.encodeBase64(clientID.getBytes("UTF-8"));
base64ClientID = new String(encoded, "UTF-8");
return base64ClientID;
}
/**
* Override this method to return a {@link Map} of HTTP headers
*
* @return {@link Map} of HTTP headers
*/
protected Map<String, String> getProcessedHeaderMap() {
/*
* The implementation is PayPal specific. The Authorization header is
* formed for OAuth or Basic, for OAuth system the authorization token
* passed as a parameter is used in creation of HTTP header, for Basic
* Authorization the ClientID and ClientSecret passed as parameters are
* used after a Base64 encoding.
*/
Map<String, String> headers = new HashMap<String, String>();
// Add any custom headers
if (headersMap != null && headersMap.size() > 0) {
headers.putAll(headersMap);
}
if (authorizationToken != null
&& authorizationToken.trim().length() > 0) {
headers.put(Constants.AUTHORIZATION_HEADER, authorizationToken);
} else if (getClientID() != null && getClientID().trim().length() > 0
&& getClientSecret() != null
&& getClientSecret().trim().length() > 0) {
try {
headers.put(Constants.AUTHORIZATION_HEADER, "Basic "
+ encodeToBase64(getClientID(), getClientSecret()));
} catch (UnsupportedEncodingException e) {
// TODO
}
}
/*
* Appends request Id which is used by PayPal API service for
* Idempotency
*/
if (requestId != null && requestId.length() > 0) {
headers.put(Constants.PAYPAL_REQUEST_ID_HEADER, requestId);
}
/*
* Add User-Agent header for tracking in PayPal system
*/
headers.putAll(formUserAgentHeader());
// Add application/json as the default Content-Type
// backward compatibility for PayPal rest sdks which
// does not add Content-Type HTTP header in the sdk
// stubs
if (!headers.containsKey(Constants.HTTP_CONTENT_TYPE_HEADER)) {
headers.put(Constants.HTTP_CONTENT_TYPE_HEADER,
Constants.HTTP_CONTENT_TYPE_JSON);
}
return headers;
}
/**
* Override this method to process payload for processing
*
* @return PayLoad as String
*/
protected String getProcessedPayLoad() {
/*
* Since the REST API of PayPal depends on json, which is well formed,
* no additional processing is required.
*/
return payLoad;
}
/**
* Return configurationMap
*
* @return configurationMap in this call pre-handler
*/
public Map<String, String> getConfigurationMap() {
return this.configurationMap;
}
}
| 3,930 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = "org.apache.flink.connector.hbase.util",
importOptions = {
ImportOption.OnlyIncludeTests.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
}
| 3,931 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/HBaseSerdeTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.types.DataType;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.List;
import static org.apache.flink.table.api.DataTypes.BIGINT;
import static org.apache.flink.table.api.DataTypes.DOUBLE;
import static org.apache.flink.table.api.DataTypes.FIELD;
import static org.apache.flink.table.api.DataTypes.INT;
import static org.apache.flink.table.api.DataTypes.ROW;
import static org.apache.flink.table.api.DataTypes.STRING;
import static org.assertj.core.api.Assertions.assertThat;
/** Test for {@link HBaseSerde}. */
class HBaseSerdeTest {
private static final String ROW_KEY = "rowkey";
private static final String FAMILY1 = "family1";
private static final String F1COL1 = "col1";
private static final String FAMILY2 = "family2";
private static final String F2COL1 = "col1";
private static final String F2COL2 = "col2";
private static final String FAMILY3 = "family3";
private static final String F3COL1 = "col1";
private static final String F3COL2 = "col2";
private static final String F3COL3 = "col3";
@Test
void convertToNewRowTest() {
HBaseSerde serde = createHBaseSerde(false);
List<List<Cell>> cellsList = prepareCells();
List<RowData> resultRowDatas = new ArrayList<>();
List<String> resultRowDataStr = new ArrayList<>();
for (List<Cell> cells : cellsList) {
RowData row = serde.convertToNewRow(Result.create(cells));
resultRowDatas.add(row);
resultRowDataStr.add(row.toString());
}
assertThat(resultRowDatas.get(0))
.as("RowData should not be reused")
.isNotSameAs(resultRowDatas.get(1));
assertThat(resultRowDataStr)
.containsExactly(
"+I(1,+I(10),+I(Hello-1,100),+I(1.01,false,Welt-1))",
"+I(2,+I(20),+I(Hello-2,200),+I(2.02,true,Welt-2))");
}
@Test
void convertToReusedRowTest() {
HBaseSerde serde = createHBaseSerde(false);
List<List<Cell>> cellsList = prepareCells();
List<RowData> resultRowDatas = new ArrayList<>();
List<String> resultRowDataStr = new ArrayList<>();
for (List<Cell> cells : cellsList) {
RowData row = serde.convertToReusedRow(Result.create(cells));
resultRowDatas.add(row);
resultRowDataStr.add(row.toString());
}
assertThat(resultRowDatas.get(0))
.as("RowData should be reused")
.isSameAs(resultRowDatas.get(1));
assertThat(resultRowDataStr)
.containsExactly(
"+I(1,+I(10),+I(Hello-1,100),+I(1.01,false,Welt-1))",
"+I(2,+I(20),+I(Hello-2,200),+I(2.02,true,Welt-2))");
}
@Test
public void writeIgnoreNullValueTest() {
HBaseSerde serde = createHBaseSerde(false);
Put m1 = serde.createPutMutation(prepareRowData(), HConstants.LATEST_TIMESTAMP);
assert m1 != null;
assertThat(m1.getRow()).isNotEmpty();
assertThat(m1.get(FAMILY1.getBytes(), F1COL1.getBytes())).isNotEmpty();
assertThat(m1.get(FAMILY2.getBytes(), F2COL1.getBytes())).isNotEmpty();
assertThat(m1.get(FAMILY2.getBytes(), F2COL2.getBytes())).isNotEmpty();
assertThat(m1.get(FAMILY3.getBytes(), F3COL1.getBytes())).isNotEmpty();
assertThat(m1.get(FAMILY3.getBytes(), F3COL2.getBytes())).isNotEmpty();
assertThat(m1.get(FAMILY3.getBytes(), F3COL3.getBytes())).isNotEmpty();
HBaseSerde writeIgnoreNullValueSerde = createHBaseSerde(true);
Put m2 =
writeIgnoreNullValueSerde.createPutMutation(
prepareRowData(), HConstants.LATEST_TIMESTAMP);
assert m2 != null;
assertThat(m2.getRow()).isNotEmpty();
assertThat(m2.get(FAMILY1.getBytes(), F1COL1.getBytes())).isEmpty();
assertThat(m2.get(FAMILY2.getBytes(), F2COL1.getBytes())).isNotEmpty();
assertThat(m2.get(FAMILY2.getBytes(), F2COL2.getBytes())).isEmpty();
assertThat(m2.get(FAMILY3.getBytes(), F2COL1.getBytes())).isNotEmpty();
assertThat(m2.get(FAMILY3.getBytes(), F3COL2.getBytes())).isNotEmpty();
assertThat(m2.get(FAMILY3.getBytes(), F3COL3.getBytes())).isEmpty();
}
private HBaseTableSchema createHBaseTableSchema() {
DataType dataType =
ROW(
FIELD(ROW_KEY, INT()),
FIELD(FAMILY1, ROW(FIELD(F1COL1, INT()))),
FIELD(FAMILY2, ROW(FIELD(F2COL1, STRING()), FIELD(F2COL2, BIGINT()))),
FIELD(
FAMILY3,
ROW(
FIELD(F3COL1, DOUBLE()),
FIELD(F3COL2, DataTypes.BOOLEAN()),
FIELD(F3COL3, STRING()))));
return HBaseTableSchema.fromDataType(dataType);
}
private HBaseSerde createHBaseSerde(boolean writeIgnoreNullValue) {
return new HBaseSerde(createHBaseTableSchema(), "null", writeIgnoreNullValue);
}
private List<List<Cell>> prepareCells() {
List<List<Cell>> cellList = new ArrayList<>();
byte[] fam1 = Bytes.toBytes(FAMILY1);
byte[] f1c1 = Bytes.toBytes(F1COL1);
byte[] fam2 = Bytes.toBytes(FAMILY2);
byte[] f2c1 = Bytes.toBytes(F2COL1);
byte[] f2c2 = Bytes.toBytes(F2COL2);
byte[] fam3 = Bytes.toBytes(FAMILY3);
byte[] f3c1 = Bytes.toBytes(F3COL1);
byte[] f3c2 = Bytes.toBytes(F3COL2);
byte[] f3c3 = Bytes.toBytes(F3COL3);
byte[] row1 = Bytes.toBytes(1);
byte[] row2 = Bytes.toBytes(2);
Cell kv111 = new KeyValue(row1, fam1, f1c1, Bytes.toBytes(10));
Cell kv121 = new KeyValue(row1, fam2, f2c1, Bytes.toBytes("Hello-1"));
Cell kv122 = new KeyValue(row1, fam2, f2c2, Bytes.toBytes(100L));
Cell kv131 = new KeyValue(row1, fam3, f3c1, Bytes.toBytes(1.01));
Cell kv132 = new KeyValue(row1, fam3, f3c2, Bytes.toBytes(false));
Cell kv133 = new KeyValue(row1, fam3, f3c3, Bytes.toBytes("Welt-1"));
Cell kv211 = new KeyValue(row2, fam1, f1c1, Bytes.toBytes(20));
Cell kv221 = new KeyValue(row2, fam2, f2c1, Bytes.toBytes("Hello-2"));
Cell kv222 = new KeyValue(row2, fam2, f2c2, Bytes.toBytes(200L));
Cell kv231 = new KeyValue(row2, fam3, f3c1, Bytes.toBytes(2.02));
Cell kv232 = new KeyValue(row2, fam3, f3c2, Bytes.toBytes(true));
Cell kv233 = new KeyValue(row2, fam3, f3c3, Bytes.toBytes("Welt-2"));
List<Cell> cells1 = new ArrayList<>();
cells1.add(kv111);
cells1.add(kv121);
cells1.add(kv122);
cells1.add(kv131);
cells1.add(kv132);
cells1.add(kv133);
List<Cell> cells2 = new ArrayList<>();
cells2.add(kv211);
cells2.add(kv221);
cells2.add(kv222);
cells2.add(kv231);
cells2.add(kv232);
cells2.add(kv233);
cellList.add(cells1);
cellList.add(cells2);
return cellList;
}
private RowData prepareRowData() {
GenericRowData fam1Row = new GenericRowData(1);
fam1Row.setField(0, null);
GenericRowData fam2Row = new GenericRowData(2);
fam2Row.setField(0, StringData.fromString("Hello-1"));
fam2Row.setField(1, null);
GenericRowData fam3Row = new GenericRowData(3);
fam3Row.setField(0, 2.02);
fam3Row.setField(1, true);
fam3Row.setField(2, null);
GenericRowData row = new GenericRowData(4);
row.setField(0, 10);
row.setField(1, fam1Row);
row.setField(2, fam2Row);
row.setField(3, fam3Row);
return row;
}
}
| 3,932 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/HBaseConfigLoadingTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.core.testutils.CommonTestUtils;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests that validate the loading of the HBase configuration, relative to entries in the Flink
* configuration and the environment variables.
*/
class HBaseConfigLoadingTest {
private static final String IN_HBASE_CONFIG_KEY = "hbase_conf_key";
private static final String IN_HBASE_CONFIG_VALUE = "hbase_conf_value!";
@TempDir Path tmpDir;
@Test
void loadFromClasspathByDefault() {
org.apache.hadoop.conf.Configuration hbaseConf =
HBaseConfigurationUtil.getHBaseConfiguration();
assertThat(hbaseConf.get(IN_HBASE_CONFIG_KEY, null)).isEqualTo(IN_HBASE_CONFIG_VALUE);
}
@Test
void loadFromEnvVariables() throws Exception {
final String k1 = "where?";
final String v1 = "I'm on a boat";
final String k2 = "when?";
final String v2 = "midnight";
final String k3 = "why?";
final String v3 = "what do you think?";
final String k4 = "which way?";
final String v4 = "south, always south...";
final File hbaseConfDir = tmpDir.toFile();
final File hbaseHome = Files.createTempDirectory(tmpDir, "junit_hbaseHome_").toFile();
final File hbaseHomeConf = new File(hbaseHome, "conf");
assertThat(hbaseHomeConf.mkdirs()).isTrue();
final File file1 = new File(hbaseConfDir, "hbase-default.xml");
final File file2 = new File(hbaseConfDir, "hbase-site.xml");
final File file3 = new File(hbaseHomeConf, "hbase-default.xml");
final File file4 = new File(hbaseHomeConf, "hbase-site.xml");
printConfig(file1, k1, v1);
printConfig(file2, k2, v2);
printConfig(file3, k3, v3);
printConfig(file4, k4, v4);
final org.apache.hadoop.conf.Configuration hbaseConf;
final Map<String, String> originalEnv = System.getenv();
final Map<String, String> newEnv = new HashMap<>(originalEnv);
newEnv.put("HBASE_CONF_DIR", hbaseConfDir.getAbsolutePath());
newEnv.put("HBASE_HOME", hbaseHome.getAbsolutePath());
try {
CommonTestUtils.setEnv(newEnv);
hbaseConf = HBaseConfigurationUtil.getHBaseConfiguration();
} finally {
CommonTestUtils.setEnv(originalEnv);
}
// contains extra entries
assertThat(hbaseConf.get(k1, null)).isEqualTo(v1);
assertThat(hbaseConf.get(k2, null)).isEqualTo(v2);
assertThat(hbaseConf.get(k3, null)).isEqualTo(v3);
assertThat(hbaseConf.get(k4, null)).isEqualTo(v4);
// also contains classpath defaults
assertThat(hbaseConf.get(IN_HBASE_CONFIG_KEY, null)).isEqualTo(IN_HBASE_CONFIG_VALUE);
}
@Test
void loadOverlappingConfig() throws Exception {
final String k1 = "key1";
final String v1 = "from HBASE_HOME/conf";
final String v2 = "from HBASE_CONF_DIR";
final File hbaseHome = tmpDir.resolve("hbaseHome").toFile();
final File hbaseHomeConf = new File(hbaseHome, "conf");
final File hbaseConfDir = tmpDir.resolve("hbaseConfDir").toFile();
assertThat(hbaseHomeConf.mkdirs()).isTrue();
final File file1 = new File(hbaseHomeConf, "hbase-site.xml");
Map<String, String> properties1 = new HashMap<>();
properties1.put(k1, v1);
printConfigs(file1, properties1);
// HBASE_CONF_DIR conf will override k1 with v2
assertThat(hbaseConfDir.mkdirs()).isTrue();
final File file2 = new File(hbaseConfDir, "hbase-site.xml");
Map<String, String> properties2 = new HashMap<>();
properties2.put(k1, v2);
printConfigs(file2, properties2);
final org.apache.hadoop.conf.Configuration hbaseConf;
final Map<String, String> originalEnv = System.getenv();
final Map<String, String> newEnv = new HashMap<>(originalEnv);
newEnv.put("HBASE_CONF_DIR", hbaseConfDir.getAbsolutePath());
newEnv.put("HBASE_HOME", hbaseHome.getAbsolutePath());
try {
CommonTestUtils.setEnv(newEnv);
hbaseConf = HBaseConfigurationUtil.getHBaseConfiguration();
} finally {
CommonTestUtils.setEnv(originalEnv);
}
// contains extra entries
assertThat(hbaseConf.get(k1, null)).isEqualTo(v2);
// also contains classpath defaults
assertThat(hbaseConf.get(IN_HBASE_CONFIG_KEY, null)).isEqualTo(IN_HBASE_CONFIG_VALUE);
}
private static void printConfig(File file, String key, String value) throws IOException {
Map<String, String> map = new HashMap<>(1);
map.put(key, value);
printConfigs(file, map);
}
private static void printConfigs(File file, Map<String, String> properties) throws IOException {
try (PrintStream out = new PrintStream(new FileOutputStream(file))) {
out.println("<?xml version=\"1.0\"?>");
out.println("<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>");
out.println("<configuration>");
for (Map.Entry<String, String> entry : properties.entrySet()) {
out.println("\t<property>");
out.println("\t\t<name>" + entry.getKey() + "</name>");
out.println("\t\t<value>" + entry.getValue() + "</value>");
out.println("\t</property>");
}
out.println("</configuration>");
}
}
}
| 3,933 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/options/HBaseWriteOptions.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.options;
import org.apache.flink.annotation.Internal;
import org.apache.hadoop.hbase.client.ConnectionConfiguration;
import java.io.Serializable;
import java.util.Objects;
/** Options for HBase writing. */
@Internal
public class HBaseWriteOptions implements Serializable {
private static final long serialVersionUID = 1L;
private final long bufferFlushMaxSizeInBytes;
private final long bufferFlushMaxRows;
private final long bufferFlushIntervalMillis;
private final boolean ignoreNullValue;
private final Integer parallelism;
private HBaseWriteOptions(
long bufferFlushMaxSizeInBytes,
long bufferFlushMaxMutations,
long bufferFlushIntervalMillis,
boolean ignoreNullValue,
Integer parallelism) {
this.bufferFlushMaxSizeInBytes = bufferFlushMaxSizeInBytes;
this.bufferFlushMaxRows = bufferFlushMaxMutations;
this.bufferFlushIntervalMillis = bufferFlushIntervalMillis;
this.ignoreNullValue = ignoreNullValue;
this.parallelism = parallelism;
}
public long getBufferFlushMaxSizeInBytes() {
return bufferFlushMaxSizeInBytes;
}
public long getBufferFlushMaxRows() {
return bufferFlushMaxRows;
}
public long getBufferFlushIntervalMillis() {
return bufferFlushIntervalMillis;
}
public boolean isIgnoreNullValue() {
return ignoreNullValue;
}
public Integer getParallelism() {
return parallelism;
}
@Override
public String toString() {
return "HBaseWriteOptions{"
+ "bufferFlushMaxSizeInBytes="
+ bufferFlushMaxSizeInBytes
+ ", bufferFlushMaxRows="
+ bufferFlushMaxRows
+ ", bufferFlushIntervalMillis="
+ bufferFlushIntervalMillis
+ ", ignoreNullValue="
+ ignoreNullValue
+ ", parallelism="
+ parallelism
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
HBaseWriteOptions that = (HBaseWriteOptions) o;
return bufferFlushMaxSizeInBytes == that.bufferFlushMaxSizeInBytes
&& bufferFlushMaxRows == that.bufferFlushMaxRows
&& bufferFlushIntervalMillis == that.bufferFlushIntervalMillis
&& ignoreNullValue == that.ignoreNullValue
&& parallelism == that.parallelism;
}
@Override
public int hashCode() {
return Objects.hash(
bufferFlushMaxSizeInBytes,
bufferFlushMaxRows,
bufferFlushIntervalMillis,
parallelism);
}
/** Creates a builder for {@link HBaseWriteOptions}. */
public static Builder builder() {
return new Builder();
}
/** Builder for {@link HBaseWriteOptions}. */
public static class Builder {
private long bufferFlushMaxSizeInBytes = ConnectionConfiguration.WRITE_BUFFER_SIZE_DEFAULT;
private long bufferFlushMaxRows = 0;
private long bufferFlushIntervalMillis = 0;
private boolean ignoreNullValue;
private Integer parallelism;
/**
* Optional. Sets when to flush a buffered request based on the memory size of rows
* currently added. Default to <code>2mb</code>.
*/
public Builder setBufferFlushMaxSizeInBytes(long bufferFlushMaxSizeInBytes) {
this.bufferFlushMaxSizeInBytes = bufferFlushMaxSizeInBytes;
return this;
}
/**
* Optional. Sets when to flush buffered request based on the number of rows currently
* added. Defaults to not set, i.e. won't flush based on the number of buffered rows.
*/
public Builder setBufferFlushMaxRows(long bufferFlushMaxRows) {
this.bufferFlushMaxRows = bufferFlushMaxRows;
return this;
}
/**
* Optional. Sets a flush interval flushing buffered requesting if the interval passes, in
* milliseconds. Defaults to not set, i.e. won't flush based on flush interval.
*/
public Builder setBufferFlushIntervalMillis(long bufferFlushIntervalMillis) {
this.bufferFlushIntervalMillis = bufferFlushIntervalMillis;
return this;
}
/**
* Optional. Sets whether ignore null value or not. By defaults, null value will be writing.
*/
public Builder setIgnoreNullValue(boolean ignoreNullValue) {
this.ignoreNullValue = ignoreNullValue;
return this;
}
/**
* Optional. Defines the parallelism of the HBase sink operator. By default, the parallelism
* is determined by the framework using the same parallelism of the upstream chained
* operator.
*/
public Builder setParallelism(Integer parallelism) {
this.parallelism = parallelism;
return this;
}
/** Creates a new instance of {@link HBaseWriteOptions}. */
public HBaseWriteOptions build() {
return new HBaseWriteOptions(
bufferFlushMaxSizeInBytes,
bufferFlushMaxRows,
bufferFlushIntervalMillis,
ignoreNullValue,
parallelism);
}
}
}
| 3,934 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTableSchema.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.TypeExtractor;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.types.utils.TypeConversions;
import org.apache.flink.util.Preconditions;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import static org.apache.flink.table.types.utils.TypeConversions.fromLogicalToDataType;
/** Helps to specify an HBase Table's schema. */
@Internal
public class HBaseTableSchema implements Serializable {
private static final long serialVersionUID = 1L;
// A Map with key as column family.
private final Map<String, Map<String, DataType>> familyMap = new LinkedHashMap<>();
// information about rowkey
private RowKeyInfo rowKeyInfo;
// charset to parse HBase keys and strings. UTF-8 by default.
private String charset = "UTF-8";
/**
* Adds a column defined by family, qualifier, and type to the table schema.
*
* @param family the family name
* @param qualifier the qualifier name
* @param clazz the data type of the qualifier
*/
public void addColumn(String family, String qualifier, Class<?> clazz) {
Preconditions.checkNotNull(clazz, "class type");
DataType type = TypeConversions.fromLegacyInfoToDataType(TypeExtractor.getForClass(clazz));
addColumn(family, qualifier, type);
}
public void addColumn(String family, String qualifier, DataType type) {
Preconditions.checkNotNull(family, "family name");
Preconditions.checkNotNull(qualifier, "qualifier name");
Preconditions.checkNotNull(type, "data type");
Map<String, DataType> qualifierMap = this.familyMap.get(family);
if (!HBaseTypeUtils.isSupportedType(type.getLogicalType())) {
// throw exception
throw new IllegalArgumentException(
"Unsupported class type found "
+ type
+ ". "
+ "Better to use byte[].class and deserialize using user defined scalar functions");
}
if (qualifierMap == null) {
qualifierMap = new LinkedHashMap<>();
}
qualifierMap.put(qualifier, type);
familyMap.put(family, qualifierMap);
}
/**
* Sets row key information in the table schema.
*
* @param rowKeyName the row key field name
* @param clazz the data type of the row key
*/
public void setRowKey(String rowKeyName, Class<?> clazz) {
Preconditions.checkNotNull(clazz, "row key class type");
DataType type = TypeConversions.fromLegacyInfoToDataType(TypeExtractor.getForClass(clazz));
setRowKey(rowKeyName, type);
}
public void setRowKey(String rowKeyName, DataType type) {
Preconditions.checkNotNull(rowKeyName, "row key field name");
Preconditions.checkNotNull(type, "row key data type");
if (!HBaseTypeUtils.isSupportedType(type.getLogicalType())) {
// throw exception
throw new IllegalArgumentException(
"Unsupported class type found "
+ type
+ ". "
+ "Better to use byte[].class and deserialize using user defined scalar functions");
}
if (rowKeyInfo != null) {
throw new IllegalArgumentException("Row key can't be set multiple times.");
}
this.rowKeyInfo = new RowKeyInfo(rowKeyName, type, familyMap.size());
}
/**
* Sets the charset for value strings and HBase identifiers.
*
* @param charset the charset for value strings and HBase identifiers.
*/
public void setCharset(String charset) {
this.charset = charset;
}
/**
* Returns the names of all registered column families.
*
* @return The names of all registered column families.
*/
public String[] getFamilyNames() {
return this.familyMap.keySet().toArray(new String[0]);
}
/**
* Returns the HBase identifiers of all registered column families.
*
* @return The HBase identifiers of all registered column families.
*/
public byte[][] getFamilyKeys() {
Charset c = Charset.forName(charset);
byte[][] familyKeys = new byte[this.familyMap.size()][];
int i = 0;
for (String name : this.familyMap.keySet()) {
familyKeys[i++] = name.getBytes(c);
}
return familyKeys;
}
/**
* Returns the names of all registered column qualifiers of a specific column family.
*
* @param family The name of the column family for which the column qualifier names are
* returned.
* @return The names of all registered column qualifiers of a specific column family.
*/
public String[] getQualifierNames(String family) {
Map<String, DataType> qualifierMap = familyMap.get(family);
if (qualifierMap == null) {
throw new IllegalArgumentException("Family " + family + " does not exist in schema.");
}
String[] qualifierNames = new String[qualifierMap.size()];
int i = 0;
for (String qualifier : qualifierMap.keySet()) {
qualifierNames[i] = qualifier;
i++;
}
return qualifierNames;
}
/**
* Returns the HBase identifiers of all registered column qualifiers for a specific column
* family.
*
* @param family The name of the column family for which the column qualifier identifiers are
* returned.
* @return The HBase identifiers of all registered column qualifiers for a specific column
* family.
*/
public byte[][] getQualifierKeys(String family) {
Map<String, DataType> qualifierMap = familyMap.get(family);
if (qualifierMap == null) {
throw new IllegalArgumentException("Family " + family + " does not exist in schema.");
}
Charset c = Charset.forName(charset);
byte[][] qualifierKeys = new byte[qualifierMap.size()][];
int i = 0;
for (String name : qualifierMap.keySet()) {
qualifierKeys[i++] = name.getBytes(c);
}
return qualifierKeys;
}
public DataType[] getQualifierDataTypes(String family) {
Map<String, DataType> qualifierMap = familyMap.get(family);
if (qualifierMap == null) {
throw new IllegalArgumentException("Family " + family + " does not exist in schema.");
}
DataType[] dataTypes = new DataType[qualifierMap.size()];
int i = 0;
for (DataType dataType : qualifierMap.values()) {
dataTypes[i] = dataType;
i++;
}
return dataTypes;
}
/**
* Returns the names and types of all registered column qualifiers of a specific column family.
*
* @param family The name of the column family for which the column qualifier names and types
* are returned.
* @return The names and types of all registered column qualifiers of a specific column family.
*/
private Map<String, DataType> getFamilyInfo(String family) {
return familyMap.get(family);
}
/** Returns field index of row key in the table schema. Returns -1 if row key is not set. */
public int getRowKeyIndex() {
return rowKeyInfo == null ? -1 : rowKeyInfo.rowKeyIndex;
}
/** Returns the optional type information of row key. Returns null if row key is not set. */
public Optional<TypeInformation<?>> getRowKeyTypeInfo() {
return rowKeyInfo == null
? Optional.empty()
: Optional.of(TypeConversions.fromDataTypeToLegacyInfo(rowKeyInfo.rowKeyType));
}
public Optional<DataType> getRowKeyDataType() {
return rowKeyInfo == null ? Optional.empty() : Optional.of(rowKeyInfo.rowKeyType);
}
/**
* Returns optional value of row key name. The row key name is the field name in hbase schema
* which can be queried in Flink SQL.
*/
public Optional<String> getRowKeyName() {
return rowKeyInfo == null ? Optional.empty() : Optional.of(rowKeyInfo.rowKeyName);
}
/**
* Converts this {@link HBaseTableSchema} to {@link DataType}, the fields are consisted of
* families and rowkey, the order is in the definition order (i.e. calling {@link
* #addColumn(String, String, Class)} and {@link #setRowKey(String, Class)}). The family field
* is a composite type which is consisted of qualifiers.
*
* @return the {@link DataType} derived from the {@link HBaseTableSchema}.
*/
public DataType convertToDataType() {
String[] familyNames = getFamilyNames();
if (rowKeyInfo != null) {
String[] fieldNames = new String[familyNames.length + 1];
DataType[] fieldTypes = new DataType[familyNames.length + 1];
for (int i = 0; i < fieldNames.length; i++) {
if (i == rowKeyInfo.rowKeyIndex) {
fieldNames[i] = rowKeyInfo.rowKeyName;
fieldTypes[i] = rowKeyInfo.rowKeyType;
} else {
int familyIndex = i < rowKeyInfo.rowKeyIndex ? i : i - 1;
String family = familyNames[familyIndex];
fieldNames[i] = family;
fieldTypes[i] =
getRowDataType(
getQualifierNames(family), getQualifierDataTypes(family));
}
}
DataTypes.Field[] fields = new DataTypes.Field[fieldNames.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = DataTypes.FIELD(fieldNames[i], fieldTypes[i]);
}
return DataTypes.ROW(fields);
} else {
String[] fieldNames = new String[familyNames.length];
DataType[] fieldTypes = new DataType[familyNames.length];
for (int i = 0; i < fieldNames.length; i++) {
String family = familyNames[i];
fieldNames[i] = family;
fieldTypes[i] =
getRowDataType(getQualifierNames(family), getQualifierDataTypes(family));
}
DataTypes.Field[] fields = new DataTypes.Field[fieldNames.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = DataTypes.FIELD(fieldNames[i], fieldTypes[i]);
}
return DataTypes.ROW(fields);
}
}
/**
* Returns row data type with given field names {@code fieldNames} and data types {@code
* fieldTypes}.
*
* @param fieldNames the field names
* @param fieldTypes the field types
* @return nullable row type
*/
private static DataType getRowDataType(String[] fieldNames, DataType[] fieldTypes) {
final DataTypes.Field[] fields = new DataTypes.Field[fieldNames.length];
for (int j = 0; j < fieldNames.length; j++) {
fields[j] = DataTypes.FIELD(fieldNames[j], fieldTypes[j]);
}
return DataTypes.ROW(fields);
}
/** Construct a {@link HBaseTableSchema} from a {@link DataType}. */
public static HBaseTableSchema fromDataType(DataType physicalRowType) {
HBaseTableSchema hbaseSchema = new HBaseTableSchema();
RowType rowType = (RowType) physicalRowType.getLogicalType();
for (RowType.RowField field : rowType.getFields()) {
LogicalType fieldType = field.getType();
if (fieldType.getTypeRoot() == LogicalTypeRoot.ROW) {
RowType familyType = (RowType) fieldType;
String familyName = field.getName();
for (RowType.RowField qualifier : familyType.getFields()) {
hbaseSchema.addColumn(
familyName,
qualifier.getName(),
fromLogicalToDataType(qualifier.getType()));
}
} else if (fieldType.getChildren().size() == 0) {
hbaseSchema.setRowKey(field.getName(), fromLogicalToDataType(fieldType));
} else {
throw new IllegalArgumentException(
"Unsupported field type '" + fieldType + "' for HBase.");
}
}
return hbaseSchema;
}
// ------------------------------------------------------------------------------------
/** A class containing information about rowKey, such as rowKeyName, rowKeyType, rowKeyIndex. */
private static class RowKeyInfo implements Serializable {
private static final long serialVersionUID = 1L;
final String rowKeyName;
final DataType rowKeyType;
final int rowKeyIndex;
RowKeyInfo(String rowKeyName, DataType rowKeyType, int rowKeyIndex) {
this.rowKeyName = rowKeyName;
this.rowKeyType = rowKeyType;
this.rowKeyIndex = rowKeyIndex;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RowKeyInfo)) {
return false;
}
RowKeyInfo that = (RowKeyInfo) o;
return Objects.equals(rowKeyName, that.rowKeyName)
&& Objects.equals(rowKeyType, that.rowKeyType)
&& Objects.equals(rowKeyIndex, that.rowKeyIndex);
}
@Override
public int hashCode() {
return Objects.hash(rowKeyName, rowKeyType, rowKeyIndex);
}
}
@Override
public boolean equals(Object o) {
if (!(o instanceof HBaseTableSchema)) {
return false;
}
HBaseTableSchema that = (HBaseTableSchema) o;
return Objects.equals(familyMap, that.familyMap)
&& Objects.equals(rowKeyInfo, that.rowKeyInfo)
&& Objects.equals(charset, that.charset);
}
@Override
public int hashCode() {
return Objects.hash(familyMap, rowKeyInfo, charset);
}
}
| 3,935 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseSerde.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeFamily;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import static org.apache.flink.table.types.logical.utils.LogicalTypeChecks.getPrecision;
import static org.apache.flink.util.Preconditions.checkArgument;
/** Utilities for HBase serialization and deserialization. */
public class HBaseSerde {
private static final byte[] EMPTY_BYTES = new byte[] {};
private static final int MIN_TIMESTAMP_PRECISION = 0;
private static final int MAX_TIMESTAMP_PRECISION = 3;
private static final int MIN_TIME_PRECISION = 0;
private static final int MAX_TIME_PRECISION = 3;
private final byte[] nullStringBytes;
private final boolean writeIgnoreNullValue;
// row key index in output row
private final int rowkeyIndex;
// family keys
private final byte[][] families;
// qualifier keys
private final byte[][][] qualifiers;
private final int fieldLength;
private GenericRowData reusedRow;
private GenericRowData[] reusedFamilyRows;
private final @Nullable FieldEncoder keyEncoder;
private final @Nullable FieldDecoder keyDecoder;
private final FieldEncoder[][] qualifierEncoders;
private final FieldDecoder[][] qualifierDecoders;
private final GenericRowData rowWithRowKey;
public HBaseSerde(HBaseTableSchema hbaseSchema, final String nullStringLiteral) {
this(hbaseSchema, nullStringLiteral, false);
}
public HBaseSerde(
HBaseTableSchema hbaseSchema,
final String nullStringLiteral,
boolean writeIgnoreNullValue) {
this.families = hbaseSchema.getFamilyKeys();
this.rowkeyIndex = hbaseSchema.getRowKeyIndex();
LogicalType rowkeyType =
hbaseSchema.getRowKeyDataType().map(DataType::getLogicalType).orElse(null);
// field length need take row key into account if it exists.
if (rowkeyIndex != -1 && rowkeyType != null) {
this.fieldLength = families.length + 1;
this.keyEncoder = createFieldEncoder(rowkeyType);
this.keyDecoder = createFieldDecoder(rowkeyType);
} else {
this.fieldLength = families.length;
this.keyEncoder = null;
this.keyDecoder = null;
}
this.nullStringBytes = nullStringLiteral.getBytes(StandardCharsets.UTF_8);
this.writeIgnoreNullValue = writeIgnoreNullValue;
// prepare output rows
this.reusedRow = new GenericRowData(fieldLength);
this.reusedFamilyRows = new GenericRowData[families.length];
this.qualifiers = new byte[families.length][][];
this.qualifierEncoders = new FieldEncoder[families.length][];
this.qualifierDecoders = new FieldDecoder[families.length][];
String[] familyNames = hbaseSchema.getFamilyNames();
for (int f = 0; f < families.length; f++) {
this.qualifiers[f] = hbaseSchema.getQualifierKeys(familyNames[f]);
DataType[] dataTypes = hbaseSchema.getQualifierDataTypes(familyNames[f]);
this.qualifierEncoders[f] =
Arrays.stream(dataTypes)
.map(DataType::getLogicalType)
.map(t -> createNullableFieldEncoder(t, nullStringBytes))
.toArray(FieldEncoder[]::new);
this.qualifierDecoders[f] =
Arrays.stream(dataTypes)
.map(DataType::getLogicalType)
.map(t -> createNullableFieldDecoder(t, nullStringBytes))
.toArray(FieldDecoder[]::new);
this.reusedFamilyRows[f] = new GenericRowData(dataTypes.length);
}
this.rowWithRowKey = new GenericRowData(1);
}
/**
* Returns an instance of Put that writes record to HBase table.
*
* @return The appropriate instance of Put for this use case.
*/
public @Nullable Put createPutMutation(RowData row, long timestamp) {
checkArgument(keyEncoder != null, "row key is not set.");
byte[] rowkey = keyEncoder.encode(row, rowkeyIndex);
if (rowkey.length == 0) {
// drop dirty records, rowkey shouldn't be zero length
return null;
}
// upsert
Put put = new Put(rowkey, timestamp);
for (int i = 0; i < fieldLength; i++) {
if (i != rowkeyIndex) {
int f = i > rowkeyIndex ? i - 1 : i;
// get family key
byte[] familyKey = families[f];
RowData familyRow = row.getRow(i, qualifiers[f].length);
for (int q = 0; q < this.qualifiers[f].length; q++) {
// ignore null value or not
if (writeIgnoreNullValue && familyRow.isNullAt(q)) {
continue;
}
// get quantifier key
byte[] qualifier = qualifiers[f][q];
// serialize value
byte[] value = qualifierEncoders[f][q].encode(familyRow, q);
put.addColumn(familyKey, qualifier, value);
}
}
}
return put;
}
/**
* Returns an instance of Delete that remove record from HBase table.
*
* @return The appropriate instance of Delete for this use case.
*/
public @Nullable Delete createDeleteMutation(RowData row, long timestamp) {
checkArgument(keyEncoder != null, "row key is not set.");
byte[] rowkey = keyEncoder.encode(row, rowkeyIndex);
if (rowkey.length == 0) {
// drop dirty records, rowkey shouldn't be zero length
return null;
}
// delete
Delete delete = new Delete(rowkey, timestamp);
for (int i = 0; i < fieldLength; i++) {
if (i != rowkeyIndex) {
int f = i > rowkeyIndex ? i - 1 : i;
// get family key
byte[] familyKey = families[f];
for (int q = 0; q < this.qualifiers[f].length; q++) {
// get quantifier key
byte[] qualifier = qualifiers[f][q];
delete.addColumn(familyKey, qualifier);
}
}
}
return delete;
}
/**
* Returns an instance of Scan that retrieves the required subset of records from the HBase
* table.
*
* @return The appropriate instance of Scan for this use case.
*/
public Scan createScan() {
Scan scan = new Scan();
for (int f = 0; f < families.length; f++) {
byte[] family = families[f];
for (int q = 0; q < qualifiers[f].length; q++) {
byte[] quantifier = qualifiers[f][q];
scan.addColumn(family, quantifier);
}
}
return scan;
}
/**
* Returns an instance of Get that retrieves the matches records from the HBase table.
*
* @return The appropriate instance of Get for this use case.
*/
public Get createGet(Object rowKey) {
checkArgument(keyEncoder != null, "row key is not set.");
rowWithRowKey.setField(0, rowKey);
byte[] rowkey = keyEncoder.encode(rowWithRowKey, 0);
if (rowkey.length == 0) {
// drop dirty records, rowkey shouldn't be zero length
return null;
}
Get get = new Get(rowkey);
for (int f = 0; f < families.length; f++) {
byte[] family = families[f];
for (byte[] qualifier : qualifiers[f]) {
get.addColumn(family, qualifier);
}
}
return get;
}
/**
* Converts HBase {@link Result} into a new {@link RowData} instance.
*
* <p>Note: this method is thread-safe.
*/
public RowData convertToNewRow(Result result) {
// The output rows needs to be initialized each time
// to prevent the possibility of putting the output object into the cache.
GenericRowData resultRow = new GenericRowData(fieldLength);
GenericRowData[] familyRows = new GenericRowData[families.length];
for (int f = 0; f < families.length; f++) {
familyRows[f] = new GenericRowData(qualifiers[f].length);
}
return convertToRow(result, resultRow, familyRows);
}
/**
* Converts HBase {@link Result} into a reused {@link RowData} instance.
*
* <p>Note: this method is NOT thread-safe.
*/
public RowData convertToReusedRow(Result result) {
return convertToRow(result, reusedRow, reusedFamilyRows);
}
private RowData convertToRow(
Result result, GenericRowData resultRow, GenericRowData[] familyRows) {
for (int i = 0; i < fieldLength; i++) {
if (rowkeyIndex == i) {
assert keyDecoder != null;
Object rowkey = keyDecoder.decode(result.getRow());
resultRow.setField(rowkeyIndex, rowkey);
} else {
int f = (rowkeyIndex != -1 && i > rowkeyIndex) ? i - 1 : i;
// get family key
byte[] familyKey = families[f];
GenericRowData familyRow = familyRows[f];
for (int q = 0; q < this.qualifiers[f].length; q++) {
// get quantifier key
byte[] qualifier = qualifiers[f][q];
// read value
byte[] value = result.getValue(familyKey, qualifier);
familyRow.setField(q, qualifierDecoders[f][q].decode(value));
}
resultRow.setField(i, familyRow);
}
}
return resultRow;
}
/**
* Converts HBase {@link Result} into {@link RowData}.
*
* @deprecated Use {@link #convertToReusedRow(Result)} instead.
*/
@Deprecated
public RowData convertToRow(Result result) {
for (int i = 0; i < fieldLength; i++) {
if (rowkeyIndex == i) {
assert keyDecoder != null;
Object rowkey = keyDecoder.decode(result.getRow());
reusedRow.setField(rowkeyIndex, rowkey);
} else {
int f = (rowkeyIndex != -1 && i > rowkeyIndex) ? i - 1 : i;
// get family key
byte[] familyKey = families[f];
GenericRowData familyRow = reusedFamilyRows[f];
for (int q = 0; q < this.qualifiers[f].length; q++) {
// get quantifier key
byte[] qualifier = qualifiers[f][q];
// read value
byte[] value = result.getValue(familyKey, qualifier);
familyRow.setField(q, qualifierDecoders[f][q].decode(value));
}
reusedRow.setField(i, familyRow);
}
}
return reusedRow;
}
// ------------------------------------------------------------------------------------
// HBase Runtime Encoders
// ------------------------------------------------------------------------------------
/** Runtime encoder that encodes a specified field in {@link RowData} into byte[]. */
@FunctionalInterface
private interface FieldEncoder extends Serializable {
byte[] encode(RowData row, int pos);
}
private static FieldEncoder createNullableFieldEncoder(
LogicalType fieldType, final byte[] nullStringBytes) {
final FieldEncoder encoder = createFieldEncoder(fieldType);
if (fieldType.isNullable()) {
if (fieldType.is(LogicalTypeFamily.CHARACTER_STRING)) {
// special logic for null string values, because HBase can store empty bytes for
// string
return (row, pos) -> {
if (row.isNullAt(pos)) {
return nullStringBytes;
} else {
return encoder.encode(row, pos);
}
};
} else {
// encode empty bytes for null values
return (row, pos) -> {
if (row.isNullAt(pos)) {
return EMPTY_BYTES;
} else {
return encoder.encode(row, pos);
}
};
}
} else {
return encoder;
}
}
private static FieldEncoder createFieldEncoder(LogicalType fieldType) {
// ordered by type root definition
switch (fieldType.getTypeRoot()) {
case CHAR:
case VARCHAR:
// get the underlying UTF-8 bytes
return (row, pos) -> row.getString(pos).toBytes();
case BOOLEAN:
return (row, pos) -> Bytes.toBytes(row.getBoolean(pos));
case BINARY:
case VARBINARY:
return RowData::getBinary;
case DECIMAL:
return createDecimalEncoder((DecimalType) fieldType);
case TINYINT:
return (row, pos) -> new byte[] {row.getByte(pos)};
case SMALLINT:
return (row, pos) -> Bytes.toBytes(row.getShort(pos));
case INTEGER:
case DATE:
case INTERVAL_YEAR_MONTH:
return (row, pos) -> Bytes.toBytes(row.getInt(pos));
case TIME_WITHOUT_TIME_ZONE:
final int timePrecision = getPrecision(fieldType);
if (timePrecision < MIN_TIME_PRECISION || timePrecision > MAX_TIME_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIME type is out of the range [%s, %s] supported by "
+ "HBase connector",
timePrecision, MIN_TIME_PRECISION, MAX_TIME_PRECISION));
}
return (row, pos) -> Bytes.toBytes(row.getInt(pos));
case BIGINT:
case INTERVAL_DAY_TIME:
return (row, pos) -> Bytes.toBytes(row.getLong(pos));
case FLOAT:
return (row, pos) -> Bytes.toBytes(row.getFloat(pos));
case DOUBLE:
return (row, pos) -> Bytes.toBytes(row.getDouble(pos));
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final int timestampPrecision = getPrecision(fieldType);
if (timestampPrecision < MIN_TIMESTAMP_PRECISION
|| timestampPrecision > MAX_TIMESTAMP_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIMESTAMP type is out of the range [%s, %s] supported by "
+ "HBase connector",
timestampPrecision,
MIN_TIMESTAMP_PRECISION,
MAX_TIMESTAMP_PRECISION));
}
return createTimestampEncoder(timestampPrecision);
default:
throw new UnsupportedOperationException("Unsupported type: " + fieldType);
}
}
private static FieldEncoder createDecimalEncoder(DecimalType decimalType) {
final int precision = decimalType.getPrecision();
final int scale = decimalType.getScale();
return (row, pos) -> {
BigDecimal decimal = row.getDecimal(pos, precision, scale).toBigDecimal();
return Bytes.toBytes(decimal);
};
}
private static FieldEncoder createTimestampEncoder(final int precision) {
return (row, pos) -> {
long millisecond = row.getTimestamp(pos, precision).getMillisecond();
return Bytes.toBytes(millisecond);
};
}
// ------------------------------------------------------------------------------------
// HBase Runtime Decoders
// ------------------------------------------------------------------------------------
/** Runtime decoder that decodes a byte[] into objects of internal data structure. */
@FunctionalInterface
private interface FieldDecoder extends Serializable {
@Nullable
Object decode(byte[] value);
}
private static FieldDecoder createNullableFieldDecoder(
LogicalType fieldType, final byte[] nullStringBytes) {
final FieldDecoder decoder = createFieldDecoder(fieldType);
if (fieldType.isNullable()) {
if (fieldType.is(LogicalTypeFamily.CHARACTER_STRING)) {
return value -> {
if (value == null || Arrays.equals(value, nullStringBytes)) {
return null;
} else {
return decoder.decode(value);
}
};
} else {
return value -> {
if (value == null || value.length == 0) {
return null;
} else {
return decoder.decode(value);
}
};
}
} else {
return decoder;
}
}
private static FieldDecoder createFieldDecoder(LogicalType fieldType) {
// ordered by type root definition
switch (fieldType.getTypeRoot()) {
case CHAR:
case VARCHAR:
// reuse bytes
return StringData::fromBytes;
case BOOLEAN:
return Bytes::toBoolean;
case BINARY:
case VARBINARY:
return value -> value;
case DECIMAL:
return createDecimalDecoder((DecimalType) fieldType);
case TINYINT:
return value -> value[0];
case SMALLINT:
return Bytes::toShort;
case INTEGER:
case DATE:
case INTERVAL_YEAR_MONTH:
return Bytes::toInt;
case TIME_WITHOUT_TIME_ZONE:
final int timePrecision = getPrecision(fieldType);
if (timePrecision < MIN_TIME_PRECISION || timePrecision > MAX_TIME_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIME type is out of the range [%s, %s] supported by "
+ "HBase connector",
timePrecision, MIN_TIME_PRECISION, MAX_TIME_PRECISION));
}
return Bytes::toInt;
case BIGINT:
case INTERVAL_DAY_TIME:
return Bytes::toLong;
case FLOAT:
return Bytes::toFloat;
case DOUBLE:
return Bytes::toDouble;
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final int timestampPrecision = getPrecision(fieldType);
if (timestampPrecision < MIN_TIMESTAMP_PRECISION
|| timestampPrecision > MAX_TIMESTAMP_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIMESTAMP type is out of the range [%s, %s] supported by "
+ "HBase connector",
timestampPrecision,
MIN_TIMESTAMP_PRECISION,
MAX_TIMESTAMP_PRECISION));
}
return createTimestampDecoder();
default:
throw new UnsupportedOperationException("Unsupported type: " + fieldType);
}
}
private static FieldDecoder createDecimalDecoder(DecimalType decimalType) {
final int precision = decimalType.getPrecision();
final int scale = decimalType.getScale();
return value -> {
BigDecimal decimal = Bytes.toBigDecimal(value);
return DecimalData.fromBigDecimal(decimal, precision, scale);
};
}
private static FieldDecoder createTimestampDecoder() {
return value -> {
// TODO: support higher precision
long milliseconds = Bytes.toLong(value);
return TimestampData.fromEpochMillis(milliseconds);
};
}
}
| 3,936 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTypeUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.hadoop.hbase.util.Bytes;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.charset.Charset;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Arrays;
import static org.apache.flink.table.types.logical.utils.LogicalTypeChecks.getPrecision;
/** A utility class to process data exchange with HBase type system. */
@Internal
public class HBaseTypeUtils {
private static final byte[] EMPTY_BYTES = new byte[] {};
private static final int MIN_TIMESTAMP_PRECISION = 0;
private static final int MAX_TIMESTAMP_PRECISION = 3;
private static final int MIN_TIME_PRECISION = 0;
private static final int MAX_TIME_PRECISION = 3;
/** Deserialize byte array to Java Object with the given type. */
public static Object deserializeToObject(byte[] value, int typeIdx, Charset stringCharset) {
switch (typeIdx) {
case 0: // byte[]
return value;
case 1: // String
return Arrays.equals(EMPTY_BYTES, value) ? null : new String(value, stringCharset);
case 2: // byte
return value[0];
case 3:
return Bytes.toShort(value);
case 4:
return Bytes.toInt(value);
case 5:
return Bytes.toLong(value);
case 6:
return Bytes.toFloat(value);
case 7:
return Bytes.toDouble(value);
case 8:
return Bytes.toBoolean(value);
case 9: // sql.Timestamp encoded as long
return new Timestamp(Bytes.toLong(value));
case 10: // sql.Date encoded as long
return new Date(Bytes.toLong(value));
case 11: // sql.Time encoded as long
return new Time(Bytes.toLong(value));
case 12:
return Bytes.toBigDecimal(value);
case 13:
return new BigInteger(value);
default:
throw new IllegalArgumentException("unsupported type index:" + typeIdx);
}
}
/** Serialize the Java Object to byte array with the given type. */
public static byte[] serializeFromObject(Object value, int typeIdx, Charset stringCharset) {
switch (typeIdx) {
case 0: // byte[]
return (byte[]) value;
case 1: // external String
return value == null ? EMPTY_BYTES : ((String) value).getBytes(stringCharset);
case 2: // byte
return value == null ? EMPTY_BYTES : new byte[] {(byte) value};
case 3:
return Bytes.toBytes((short) value);
case 4:
return Bytes.toBytes((int) value);
case 5:
return Bytes.toBytes((long) value);
case 6:
return Bytes.toBytes((float) value);
case 7:
return Bytes.toBytes((double) value);
case 8:
return Bytes.toBytes((boolean) value);
case 9: // sql.Timestamp encoded to Long
return Bytes.toBytes(((Timestamp) value).getTime());
case 10: // sql.Date encoded as long
return Bytes.toBytes(((Date) value).getTime());
case 11: // sql.Time encoded as long
return Bytes.toBytes(((Time) value).getTime());
case 12:
return Bytes.toBytes((BigDecimal) value);
case 13:
return ((BigInteger) value).toByteArray();
default:
throw new IllegalArgumentException("unsupported type index:" + typeIdx);
}
}
/**
* Gets the type index (type representation in HBase connector) from the {@link
* TypeInformation}.
*/
public static int getTypeIndex(TypeInformation typeInfo) {
return getTypeIndex(typeInfo.getTypeClass());
}
/** Checks whether the given Class is a supported type in HBase connector. */
public static boolean isSupportedType(Class<?> clazz) {
return getTypeIndex(clazz) != -1;
}
private static int getTypeIndex(Class<?> clazz) {
if (byte[].class.equals(clazz)) {
return 0;
} else if (String.class.equals(clazz)) {
return 1;
} else if (Byte.class.equals(clazz)) {
return 2;
} else if (Short.class.equals(clazz)) {
return 3;
} else if (Integer.class.equals(clazz)) {
return 4;
} else if (Long.class.equals(clazz)) {
return 5;
} else if (Float.class.equals(clazz)) {
return 6;
} else if (Double.class.equals(clazz)) {
return 7;
} else if (Boolean.class.equals(clazz)) {
return 8;
} else if (Timestamp.class.equals(clazz)) {
return 9;
} else if (Date.class.equals(clazz)) {
return 10;
} else if (Time.class.equals(clazz)) {
return 11;
} else if (BigDecimal.class.equals(clazz)) {
return 12;
} else if (BigInteger.class.equals(clazz)) {
return 13;
} else {
return -1;
}
}
/** Checks whether the given {@link LogicalType} is supported in HBase connector. */
public static boolean isSupportedType(LogicalType type) {
// ordered by type root definition
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BOOLEAN:
case BINARY:
case VARBINARY:
case DECIMAL:
case TINYINT:
case SMALLINT:
case INTEGER:
case DATE:
case INTERVAL_YEAR_MONTH:
case BIGINT:
case INTERVAL_DAY_TIME:
case FLOAT:
case DOUBLE:
return true;
case TIME_WITHOUT_TIME_ZONE:
final int timePrecision = getPrecision(type);
if (timePrecision < MIN_TIME_PRECISION || timePrecision > MAX_TIME_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIME type is out of the range [%s, %s] supported by "
+ "HBase connector",
timePrecision, MIN_TIME_PRECISION, MAX_TIME_PRECISION));
}
return true;
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final int timestampPrecision = getPrecision(type);
if (timestampPrecision < MIN_TIMESTAMP_PRECISION
|| timestampPrecision > MAX_TIMESTAMP_PRECISION) {
throw new UnsupportedOperationException(
String.format(
"The precision %s of TIMESTAMP type is out of the range [%s, %s] supported by "
+ "HBase connector",
timestampPrecision,
MIN_TIMESTAMP_PRECISION,
MAX_TIMESTAMP_PRECISION));
}
return true;
case TIMESTAMP_WITH_TIME_ZONE:
case ARRAY:
case MULTISET:
case MAP:
case ROW:
case STRUCTURED_TYPE:
case DISTINCT_TYPE:
case RAW:
case NULL:
case SYMBOL:
case UNRESOLVED:
return false;
default:
throw new IllegalArgumentException();
}
}
}
| 3,937 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.util;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.io.Writable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
/** This class helps to do serialization for hadoop Configuration and HBase-related classes. */
@Internal
public class HBaseConfigurationUtil {
private static final Logger LOG = LoggerFactory.getLogger(HBaseConfigurationUtil.class);
public static final String ENV_HBASE_CONF_DIR = "HBASE_CONF_DIR";
public static Configuration getHBaseConfiguration() {
// Instantiate an HBaseConfiguration to load the hbase-default.xml and hbase-site.xml from
// the classpath.
Configuration result = HBaseConfiguration.create();
boolean foundHBaseConfiguration = false;
// We need to load both hbase-default.xml and hbase-site.xml to the hbase configuration
// The properties of a newly added resource will override the ones in previous resources, so
// a configuration
// file with higher priority should be added later.
// Approach 1: HBASE_HOME environment variables
String possibleHBaseConfPath = null;
final String hbaseHome = System.getenv("HBASE_HOME");
if (hbaseHome != null) {
LOG.debug("Searching HBase configuration files in HBASE_HOME: {}", hbaseHome);
possibleHBaseConfPath = hbaseHome + "/conf";
}
if (possibleHBaseConfPath != null) {
foundHBaseConfiguration = addHBaseConfIfFound(result, possibleHBaseConfPath);
}
// Approach 2: HBASE_CONF_DIR environment variable
String hbaseConfDir = System.getenv("HBASE_CONF_DIR");
if (hbaseConfDir != null) {
LOG.debug("Searching HBase configuration files in HBASE_CONF_DIR: {}", hbaseConfDir);
foundHBaseConfiguration =
addHBaseConfIfFound(result, hbaseConfDir) || foundHBaseConfiguration;
}
if (!foundHBaseConfiguration) {
LOG.warn(
"Could not find HBase configuration via any of the supported methods "
+ "(Flink configuration, environment variables).");
}
return result;
}
/**
* Search HBase configuration files in the given path, and add them to the configuration if
* found.
*/
private static boolean addHBaseConfIfFound(
Configuration configuration, String possibleHBaseConfPath) {
boolean foundHBaseConfiguration = false;
if (new File(possibleHBaseConfPath).exists()) {
if (new File(possibleHBaseConfPath + "/hbase-default.xml").exists()) {
configuration.addResource(
new org.apache.hadoop.fs.Path(
possibleHBaseConfPath + "/hbase-default.xml"));
LOG.debug(
"Adding "
+ possibleHBaseConfPath
+ "/hbase-default.xml to hbase configuration");
foundHBaseConfiguration = true;
}
if (new File(possibleHBaseConfPath + "/hbase-site.xml").exists()) {
configuration.addResource(
new org.apache.hadoop.fs.Path(possibleHBaseConfPath + "/hbase-site.xml"));
LOG.debug(
"Adding "
+ possibleHBaseConfPath
+ "/hbase-site.xml to hbase configuration");
foundHBaseConfiguration = true;
}
}
return foundHBaseConfiguration;
}
/** Serialize a Hadoop {@link Configuration} into byte[]. */
public static byte[] serializeConfiguration(Configuration conf) {
try {
return serializeWritable(conf);
} catch (IOException e) {
throw new RuntimeException(
"Encounter an IOException when serialize the Configuration.", e);
}
}
/**
* Deserialize a Hadoop {@link Configuration} from byte[]. Deserialize configs to {@code
* targetConfig} if it is set.
*/
public static Configuration deserializeConfiguration(
byte[] serializedConfig, Configuration targetConfig) {
if (null == targetConfig) {
targetConfig = new Configuration();
}
try {
deserializeWritable(targetConfig, serializedConfig);
} catch (IOException e) {
throw new RuntimeException(
"Encounter an IOException when deserialize the Configuration.", e);
}
return targetConfig;
}
/**
* Serialize writable byte[].
*
* @param <T> the type parameter
* @param writable the writable
* @return the byte [ ]
* @throws IOException the io exception
*/
private static <T extends Writable> byte[] serializeWritable(T writable) throws IOException {
Preconditions.checkArgument(writable != null);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream);
writable.write(outputStream);
return byteArrayOutputStream.toByteArray();
}
/**
* Deserialize writable.
*
* @param <T> the type parameter
* @param writable the writable
* @param bytes the bytes
* @throws IOException the io exception
*/
private static <T extends Writable> void deserializeWritable(T writable, byte[] bytes)
throws IOException {
Preconditions.checkArgument(writable != null);
Preconditions.checkArgument(bytes != null);
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes);
DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream);
writable.readFields(dataInputStream);
}
public static org.apache.hadoop.conf.Configuration createHBaseConf() {
org.apache.hadoop.conf.Configuration hbaseClientConf = HBaseConfiguration.create();
String hbaseConfDir = System.getenv(ENV_HBASE_CONF_DIR);
if (hbaseConfDir != null) {
if (new File(hbaseConfDir).exists()) {
String coreSite = hbaseConfDir + "/core-site.xml";
String hdfsSite = hbaseConfDir + "/hdfs-site.xml";
String hbaseSite = hbaseConfDir + "/hbase-site.xml";
if (new File(coreSite).exists()) {
hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(coreSite));
LOG.info("Adding " + coreSite + " to hbase configuration");
}
if (new File(hdfsSite).exists()) {
hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(hdfsSite));
LOG.info("Adding " + hdfsSite + " to hbase configuration");
}
if (new File(hbaseSite).exists()) {
hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(hbaseSite));
LOG.info("Adding " + hbaseSite + " to hbase configuration");
}
} else {
LOG.warn(
"HBase config directory '{}' not found, cannot load HBase configuration.",
hbaseConfDir);
}
} else {
LOG.warn(
"{} env variable not found, cannot load HBase configuration.",
ENV_HBASE_CONF_DIR);
}
return hbaseClientConf;
}
}
| 3,938 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.core.io.LocatableInputSplit;
/**
* This class implements a input splits for HBase. Each table input split corresponds to a key range
* (low, high). All references to row below refer to the key of the row.
*/
@Internal
public class TableInputSplit extends LocatableInputSplit {
private static final long serialVersionUID = 1L;
/** The name of the table to retrieve data from. */
private final byte[] tableName;
/** The start row of the split. */
private final byte[] startRow;
/** The end row of the split. */
private final byte[] endRow;
/**
* Creates a new table input split.
*
* @param splitNumber the number of the input split
* @param hostnames the names of the hosts storing the data the input split refers to
* @param tableName the name of the table to retrieve data from
* @param startRow the start row of the split
* @param endRow the end row of the split
*/
public TableInputSplit(
final int splitNumber,
final String[] hostnames,
final byte[] tableName,
final byte[] startRow,
final byte[] endRow) {
super(splitNumber, hostnames);
this.tableName = tableName;
this.startRow = startRow;
this.endRow = endRow;
}
/**
* Returns the table name.
*
* @return The table name.
*/
public byte[] getTableName() {
return this.tableName;
}
/**
* Returns the start row.
*
* @return The start row.
*/
public byte[] getStartRow() {
return this.startRow;
}
/**
* Returns the end row.
*
* @return The end row.
*/
public byte[] getEndRow() {
return this.endRow;
}
}
| 3,939 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataLookupFunction.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseSerde;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.table.functions.LookupFunction;
import org.apache.flink.util.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
/**
* The HBaseRowDataLookupFunction is a standard user-defined table function, it can be used in
* tableAPI and also useful for temporal table join plan in SQL. It looks up the result as {@link
* RowData}.
*/
@Internal
public class HBaseRowDataLookupFunction extends LookupFunction {
private static final Logger LOG = LoggerFactory.getLogger(HBaseRowDataLookupFunction.class);
private static final long serialVersionUID = 1L;
private final String hTableName;
private final byte[] serializedConfig;
private final HBaseTableSchema hbaseTableSchema;
private final String nullStringLiteral;
private transient Connection hConnection;
private transient HTable table;
private transient HBaseSerde serde;
private final int maxRetryTimes;
public HBaseRowDataLookupFunction(
Configuration configuration,
String hTableName,
HBaseTableSchema hbaseTableSchema,
String nullStringLiteral,
int maxRetryTimes) {
this.serializedConfig = HBaseConfigurationUtil.serializeConfiguration(configuration);
this.hTableName = hTableName;
this.hbaseTableSchema = hbaseTableSchema;
this.nullStringLiteral = nullStringLiteral;
this.maxRetryTimes = maxRetryTimes;
}
/**
* The invoke entry point of lookup function.
*
* @param keyRow - A {@link RowData} that wraps lookup keys. Currently only support single
* rowkey.
*/
@Override
public Collection<RowData> lookup(RowData keyRow) throws IOException {
for (int retry = 0; retry <= maxRetryTimes; retry++) {
try {
// TODO: The implementation of LookupFunction will pass a GenericRowData as key row
// and it's safe to cast for now. We need to update the logic once we improve the
// LookupFunction in the future.
Get get = serde.createGet(((GenericRowData) keyRow).getField(0));
if (get != null) {
Result result = table.get(get);
if (!result.isEmpty()) {
return Collections.singletonList(serde.convertToReusedRow(result));
}
}
break;
} catch (IOException e) {
LOG.error(String.format("HBase lookup error, retry times = %d", retry), e);
if (retry >= maxRetryTimes) {
throw new RuntimeException("Execution of HBase lookup failed.", e);
}
try {
Thread.sleep(1000 * retry);
} catch (InterruptedException e1) {
throw new RuntimeException(e1);
}
}
}
return Collections.emptyList();
}
private Configuration prepareRuntimeConfiguration() {
// create default configuration from current runtime env (`hbase-site.xml` in classpath)
// first,
// and overwrite configuration using serialized configuration from client-side env
// (`hbase-site.xml` in classpath).
// user params from client-side have the highest priority
Configuration runtimeConfig =
HBaseConfigurationUtil.deserializeConfiguration(
serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
// do validation: check key option(s) in final runtime configuration
if (StringUtils.isNullOrWhitespaceOnly(runtimeConfig.get(HConstants.ZOOKEEPER_QUORUM))) {
LOG.error(
"can not connect to HBase without {} configuration",
HConstants.ZOOKEEPER_QUORUM);
throw new IllegalArgumentException(
"check HBase configuration failed, lost: '"
+ HConstants.ZOOKEEPER_QUORUM
+ "'!");
}
return runtimeConfig;
}
@Override
public void open(FunctionContext context) {
LOG.info("start open ...");
Configuration config = prepareRuntimeConfiguration();
try {
hConnection = ConnectionFactory.createConnection(config);
table = (HTable) hConnection.getTable(TableName.valueOf(hTableName));
} catch (TableNotFoundException tnfe) {
LOG.error("Table '{}' not found ", hTableName, tnfe);
throw new RuntimeException("HBase table '" + hTableName + "' not found.", tnfe);
} catch (IOException ioe) {
LOG.error("Exception while creating connection to HBase.", ioe);
throw new RuntimeException("Cannot create connection to HBase.", ioe);
}
this.serde = new HBaseSerde(hbaseTableSchema, nullStringLiteral);
LOG.info("end open.");
}
@Override
public void close() {
LOG.info("start close ...");
if (null != table) {
try {
table.close();
table = null;
} catch (IOException e) {
// ignore exception when close.
LOG.warn("exception when close table", e);
}
}
if (null != hConnection) {
try {
hConnection.close();
hConnection = null;
} catch (IOException e) {
// ignore exception when close.
LOG.warn("exception when close connection", e);
}
}
LOG.info("end close.");
}
@VisibleForTesting
public String getHTableName() {
return hTableName;
}
}
| 3,940 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseDynamicTableSource.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.Projection;
import org.apache.flink.table.connector.source.InputFormatProvider;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.ScanTableSource;
import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.PartialCachingLookupProvider;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.hadoop.conf.Configuration;
import javax.annotation.Nullable;
import static org.apache.flink.util.Preconditions.checkArgument;
/** HBase table source implementation. */
@Internal
public abstract class AbstractHBaseDynamicTableSource
implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown {
protected final Configuration conf;
protected final String tableName;
protected HBaseTableSchema hbaseSchema;
protected final String nullStringLiteral;
protected final int maxRetryTimes;
@Nullable protected final LookupCache cache;
public AbstractHBaseDynamicTableSource(
Configuration conf,
String tableName,
HBaseTableSchema hbaseSchema,
String nullStringLiteral,
int maxRetryTimes,
@Nullable LookupCache cache) {
this.conf = conf;
this.tableName = tableName;
this.hbaseSchema = hbaseSchema;
this.nullStringLiteral = nullStringLiteral;
this.maxRetryTimes = maxRetryTimes;
this.cache = cache;
}
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
return InputFormatProvider.of(getInputFormat());
}
protected abstract InputFormat<RowData, ?> getInputFormat();
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
checkArgument(
context.getKeys().length == 1 && context.getKeys()[0].length == 1,
"Currently, HBase table can only be lookup by single rowkey.");
checkArgument(
hbaseSchema.getRowKeyName().isPresent(),
"HBase schema must have a row key when used in lookup mode.");
checkArgument(
DataType.getFieldNames(hbaseSchema.convertToDataType())
.get(context.getKeys()[0][0])
.equals(hbaseSchema.getRowKeyName().get()),
"Currently, HBase table only supports lookup by rowkey field.");
HBaseRowDataLookupFunction lookupFunction =
new HBaseRowDataLookupFunction(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes);
if (cache != null) {
return PartialCachingLookupProvider.of(lookupFunction, cache);
} else {
return LookupFunctionProvider.of(lookupFunction);
}
}
@Override
public boolean supportsNestedProjection() {
// planner doesn't support nested projection push down yet.
return false;
}
@Override
public void applyProjection(int[][] projectedFields, DataType producedDataType) {
this.hbaseSchema =
HBaseTableSchema.fromDataType(
Projection.of(projectedFields).project(hbaseSchema.convertToDataType()));
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
@Override
public String asSummaryString() {
return "HBase";
}
// -------------------------------------------------------------------------------------------
@VisibleForTesting
public HBaseTableSchema getHBaseTableSchema() {
return this.hbaseSchema;
}
@VisibleForTesting
public int getMaxRetryTimes() {
return maxRetryTimes;
}
@VisibleForTesting
@Nullable
public LookupCache getCache() {
return cache;
}
}
| 3,941 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseSinkFunction.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.sink;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.util.StringUtils;
import org.apache.flink.util.concurrent.ExecutorThreadFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* The sink function for HBase.
*
* <p>This class leverage {@link BufferedMutator} to buffer multiple {@link
* org.apache.hadoop.hbase.client.Mutation Mutations} before sending the requests to cluster. The
* buffering strategy can be configured by {@code bufferFlushMaxSizeInBytes}, {@code
* bufferFlushMaxMutations} and {@code bufferFlushIntervalMillis}.
*/
@Internal
public class HBaseSinkFunction<T> extends RichSinkFunction<T>
implements CheckpointedFunction, BufferedMutator.ExceptionListener {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(HBaseSinkFunction.class);
private final String hTableName;
private final byte[] serializedConfig;
private final long bufferFlushMaxSizeInBytes;
private final long bufferFlushMaxMutations;
private final long bufferFlushIntervalMillis;
private final HBaseMutationConverter<T> mutationConverter;
private transient Connection connection;
private transient DeduplicatedMutator mutator;
private transient ScheduledExecutorService executor;
private transient ScheduledFuture scheduledFuture;
private transient AtomicLong numPendingRequests;
private transient volatile boolean closed = false;
/**
* This is set from inside the {@link BufferedMutator.ExceptionListener} if a {@link Throwable}
* was thrown.
*
* <p>Errors will be checked and rethrown before processing each input element, and when the
* sink is closed.
*/
private final AtomicReference<Throwable> failureThrowable = new AtomicReference<>();
public HBaseSinkFunction(
String hTableName,
org.apache.hadoop.conf.Configuration conf,
HBaseMutationConverter<T> mutationConverter,
long bufferFlushMaxSizeInBytes,
long bufferFlushMaxMutations,
long bufferFlushIntervalMillis) {
this.hTableName = hTableName;
// Configuration is not serializable
this.serializedConfig = HBaseConfigurationUtil.serializeConfiguration(conf);
this.mutationConverter = mutationConverter;
this.bufferFlushMaxSizeInBytes = bufferFlushMaxSizeInBytes;
this.bufferFlushMaxMutations = bufferFlushMaxMutations;
this.bufferFlushIntervalMillis = bufferFlushIntervalMillis;
}
@Override
public void open(Configuration parameters) throws Exception {
LOG.info("start open ...");
org.apache.hadoop.conf.Configuration config = prepareRuntimeConfiguration();
try {
this.mutationConverter.open();
this.numPendingRequests = new AtomicLong(0);
if (null == connection) {
this.connection = ConnectionFactory.createConnection(config);
}
TableName tableName = TableName.valueOf(hTableName);
if (!connection.getAdmin().tableExists(tableName)) {
throw new TableNotFoundException(tableName);
}
// create a parameter instance, set the table name and custom listener reference.
BufferedMutatorParams params = new BufferedMutatorParams(tableName).listener(this);
if (bufferFlushMaxSizeInBytes > 0) {
params.writeBufferSize(bufferFlushMaxSizeInBytes);
}
this.mutator =
new DeduplicatedMutator(
(int) bufferFlushMaxMutations, connection.getBufferedMutator(params));
if (bufferFlushIntervalMillis > 0 && bufferFlushMaxMutations != 1) {
this.executor =
Executors.newScheduledThreadPool(
1, new ExecutorThreadFactory("hbase-upsert-sink-flusher"));
this.scheduledFuture =
this.executor.scheduleWithFixedDelay(
() -> {
if (closed) {
return;
}
try {
flush();
} catch (Exception e) {
// fail the sink and skip the rest of the items
// if the failure handler decides to throw an exception
failureThrowable.compareAndSet(null, e);
}
},
bufferFlushIntervalMillis,
bufferFlushIntervalMillis,
TimeUnit.MILLISECONDS);
}
} catch (TableNotFoundException tnfe) {
LOG.error("The table " + hTableName + " not found ", tnfe);
throw new RuntimeException("HBase table '" + hTableName + "' not found.", tnfe);
} catch (IOException ioe) {
LOG.error("Exception while creating connection to HBase.", ioe);
throw new RuntimeException("Cannot create connection to HBase.", ioe);
}
LOG.info("end open.");
}
private org.apache.hadoop.conf.Configuration prepareRuntimeConfiguration() throws IOException {
// create default configuration from current runtime env (`hbase-site.xml` in classpath)
// first,
// and overwrite configuration using serialized configuration from client-side env
// (`hbase-site.xml` in classpath).
// user params from client-side have the highest priority
org.apache.hadoop.conf.Configuration runtimeConfig =
HBaseConfigurationUtil.deserializeConfiguration(
serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
// do validation: check key option(s) in final runtime configuration
if (StringUtils.isNullOrWhitespaceOnly(runtimeConfig.get(HConstants.ZOOKEEPER_QUORUM))) {
LOG.error(
"Can not connect to HBase without {} configuration",
HConstants.ZOOKEEPER_QUORUM);
throw new IOException(
"Check HBase configuration failed, lost: '"
+ HConstants.ZOOKEEPER_QUORUM
+ "'!");
}
return runtimeConfig;
}
private void checkErrorAndRethrow() {
Throwable cause = failureThrowable.get();
if (cause != null) {
throw new RuntimeException("An error occurred in HBaseSink.", cause);
}
}
@SuppressWarnings("rawtypes")
@Override
public void invoke(T value, Context context) throws Exception {
checkErrorAndRethrow();
mutator.mutate(mutationConverter.convertToMutation(value));
// flush when the buffer number of mutations greater than the configured max size.
if (bufferFlushMaxMutations > 0
&& numPendingRequests.incrementAndGet() >= bufferFlushMaxMutations) {
flush();
}
}
private void flush() throws IOException {
// DeduplicatedMutator is thread-safe
mutator.flush();
numPendingRequests.set(0);
checkErrorAndRethrow();
}
@Override
public void close() throws Exception {
closed = true;
if (mutator != null) {
try {
mutator.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase BufferedMutator.", e);
}
this.mutator = null;
}
if (connection != null) {
try {
connection.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase Connection.", e);
}
this.connection = null;
}
if (scheduledFuture != null) {
scheduledFuture.cancel(false);
if (executor != null) {
executor.shutdownNow();
}
}
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
while (numPendingRequests.get() != 0) {
flush();
}
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
// nothing to do.
}
@Override
public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator)
throws RetriesExhaustedWithDetailsException {
// fail the sink and skip the rest of the items
// if the failure handler decides to throw an exception
failureThrowable.compareAndSet(null, exception);
}
/**
* Thread-safe class, grouped mutations by rows and keep the latest mutation. For more info, see
* <a href="https://issues.apache.org/jira/browse/HBASE-8626">HBASE-8626</a>.
*/
private static class DeduplicatedMutator {
private final BufferedMutator mutator;
private final Map<ByteBuffer, Mutation> mutations;
DeduplicatedMutator(int size, BufferedMutator mutator) {
this.mutator = mutator;
this.mutations = new HashMap<>(size);
}
synchronized void mutate(Mutation current) {
ByteBuffer key = ByteBuffer.wrap(current.getRow());
Mutation old = mutations.get(key);
if (old == null || current.getTimeStamp() >= old.getTimeStamp()) {
mutations.put(key, current);
}
}
synchronized void flush() throws IOException {
mutator.mutate(new ArrayList<>(mutations.values()));
mutator.flush();
mutations.clear();
}
synchronized void close() throws IOException {
mutator.mutate(new ArrayList<>(mutations.values()));
mutator.close();
mutations.clear();
}
}
}
| 3,942 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/WritableMetadata.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.sink;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.hadoop.hbase.HConstants;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/** Writable metadata for HBase. */
public abstract class WritableMetadata<T> implements Serializable {
private static final long serialVersionUID = 1L;
/**
* Returns the map of metadata keys and their corresponding data types that can be consumed by
* HBase sink for writing.
*
* <p>Note: All the supported writable metadata should be manually registered in it.
*/
public static Map<String, DataType> list() {
Map<String, DataType> metadataMap = new HashMap<>();
metadataMap.put(TimestampMetadata.KEY, TimestampMetadata.DATA_TYPE);
return Collections.unmodifiableMap(metadataMap);
}
public abstract T read(RowData row);
/** Timestamp metadata for HBase. */
public static class TimestampMetadata extends WritableMetadata<Long> {
public static final String KEY = "timestamp";
public static final DataType DATA_TYPE =
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).nullable();
private final int pos;
public TimestampMetadata(int pos) {
this.pos = pos;
}
@Override
public Long read(RowData row) {
if (pos < 0) {
return HConstants.LATEST_TIMESTAMP;
}
if (row.isNullAt(pos)) {
throw new IllegalArgumentException(
String.format("Writable metadata '%s' can not accept null value", KEY));
}
return row.getTimestamp(pos, 3).getMillisecond();
}
public static TimestampMetadata of(List<String> metadataKeys, DataType physicalDataType) {
int pos = metadataKeys.indexOf(TimestampMetadata.KEY);
if (pos < 0) {
return new TimestampMetadata(-1);
}
return new TimestampMetadata(
pos + physicalDataType.getLogicalType().getChildren().size());
}
}
}
| 3,943 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/RowDataToMutationConverter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.sink;
import org.apache.flink.connector.hbase.sink.WritableMetadata.TimestampMetadata;
import org.apache.flink.connector.hbase.util.HBaseSerde;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.RowKind;
import org.apache.hadoop.hbase.client.Mutation;
import java.util.List;
/**
* An implementation of {@link HBaseMutationConverter} which converts {@link RowData} into {@link
* Mutation}.
*/
public class RowDataToMutationConverter implements HBaseMutationConverter<RowData> {
private static final long serialVersionUID = 1L;
private final HBaseTableSchema schema;
private final String nullStringLiteral;
private final boolean ignoreNullValue;
private final TimestampMetadata timestampMetadata;
private transient HBaseSerde serde;
public RowDataToMutationConverter(
HBaseTableSchema schema,
DataType physicalDataType,
List<String> metadataKeys,
String nullStringLiteral,
boolean ignoreNullValue) {
this.schema = schema;
this.nullStringLiteral = nullStringLiteral;
this.ignoreNullValue = ignoreNullValue;
this.timestampMetadata = TimestampMetadata.of(metadataKeys, physicalDataType);
}
@Override
public void open() {
this.serde = new HBaseSerde(schema, nullStringLiteral, ignoreNullValue);
}
@Override
public Mutation convertToMutation(RowData record) {
Long timestamp = timestampMetadata.read(record);
RowKind kind = record.getRowKind();
if (kind == RowKind.INSERT || kind == RowKind.UPDATE_AFTER) {
return serde.createPutMutation(record, timestamp);
} else {
return serde.createDeleteMutation(record, timestamp);
}
}
}
| 3,944 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.sink;
import org.apache.flink.annotation.Internal;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import java.io.Serializable;
/**
* A converter used to converts the input record into HBase {@link Mutation}.
*
* @param <T> type of input record.
*/
@Internal
public interface HBaseMutationConverter<T> extends Serializable {
/** Initialization method for the function. It is called once before conversion method. */
void open();
/**
* Converts the input record into HBase {@link Mutation}. A mutation can be a {@link Put} or
* {@link Delete}.
*/
Mutation convertToMutation(T record);
}
| 3,945 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/table/HBaseConnectorOptions.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.table;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.factories.FactoryUtil;
import java.time.Duration;
/** Options for the HBase connector. */
@PublicEvolving
public class HBaseConnectorOptions {
public static final ConfigOption<String> TABLE_NAME =
ConfigOptions.key("table-name")
.stringType()
.noDefaultValue()
.withDescription(
"The name of HBase table to connect. "
+ "By default, the table is in 'default' namespace. "
+ "To assign the table a specified namespace you need to use 'namespace:table'.");
public static final ConfigOption<String> ZOOKEEPER_QUORUM =
ConfigOptions.key("zookeeper.quorum")
.stringType()
.noDefaultValue()
.withDescription("The HBase Zookeeper quorum.");
public static final ConfigOption<String> ZOOKEEPER_ZNODE_PARENT =
ConfigOptions.key("zookeeper.znode.parent")
.stringType()
.defaultValue("/hbase")
.withDescription("The root dir in Zookeeper for HBase cluster.");
public static final ConfigOption<String> NULL_STRING_LITERAL =
ConfigOptions.key("null-string-literal")
.stringType()
.defaultValue("null")
.withDescription(
"Representation for null values for string fields. HBase source and "
+ "sink encodes/decodes empty bytes as null values for all types except string type.");
public static final ConfigOption<MemorySize> SINK_BUFFER_FLUSH_MAX_SIZE =
ConfigOptions.key("sink.buffer-flush.max-size")
.memoryType()
.defaultValue(MemorySize.parse("2mb"))
.withDescription(
"Writing option, maximum size in memory of buffered rows for each "
+ "writing request. This can improve performance for writing data to HBase database, "
+ "but may increase the latency. Can be set to '0' to disable it. ");
public static final ConfigOption<Integer> SINK_BUFFER_FLUSH_MAX_ROWS =
ConfigOptions.key("sink.buffer-flush.max-rows")
.intType()
.defaultValue(1000)
.withDescription(
"Writing option, maximum number of rows to buffer for each writing request. "
+ "This can improve performance for writing data to HBase database, but may increase the latency. "
+ "Can be set to '0' to disable it.");
public static final ConfigOption<Duration> SINK_BUFFER_FLUSH_INTERVAL =
ConfigOptions.key("sink.buffer-flush.interval")
.durationType()
.defaultValue(Duration.ofSeconds(1))
.withDescription(
"Writing option, the interval to flush any buffered rows. "
+ "This can improve performance for writing data to HBase database, but may increase the latency. "
+ "Can be set to '0' to disable it. Note, both 'sink.buffer-flush.max-size' and 'sink.buffer-flush.max-rows' "
+ "can be set to '0' with the flush interval set allowing for complete async processing of buffered actions.");
public static final ConfigOption<Boolean> SINK_IGNORE_NULL_VALUE =
ConfigOptions.key("sink.ignore-null-value")
.booleanType()
.defaultValue(false)
.withDescription("Writing option, whether ignore null value or not.");
public static final ConfigOption<Boolean> LOOKUP_ASYNC =
ConfigOptions.key("lookup.async")
.booleanType()
.defaultValue(false)
.withDescription("whether to set async lookup.");
/** @deprecated Please use {@link LookupOptions#PARTIAL_CACHE_MAX_ROWS} instead. */
@Deprecated
public static final ConfigOption<Long> LOOKUP_CACHE_MAX_ROWS =
ConfigOptions.key("lookup.cache.max-rows")
.longType()
.defaultValue(-1L)
.withDescription(
"the max number of rows of lookup cache, over this value, the oldest rows will "
+ "be eliminated. \"cache.max-rows\" and \"cache.ttl\" options must all be specified if any of them is "
+ "specified. Cache is not enabled as default.");
/** @deprecated Please use {@link LookupOptions#PARTIAL_CACHE_EXPIRE_AFTER_WRITE} instead. */
@Deprecated
public static final ConfigOption<Duration> LOOKUP_CACHE_TTL =
ConfigOptions.key("lookup.cache.ttl")
.durationType()
.defaultValue(Duration.ofSeconds(0))
.withDescription("the cache time to live.");
/** @deprecated Please used {@link LookupOptions#MAX_RETRIES} instead. */
public static final ConfigOption<Integer> LOOKUP_MAX_RETRIES =
ConfigOptions.key("lookup.max-retries")
.intType()
.defaultValue(3)
.withDescription("the max retry times if lookup database failed.");
public static final ConfigOption<Integer> SINK_PARALLELISM = FactoryUtil.SINK_PARALLELISM;
private HBaseConnectorOptions() {}
}
| 3,946 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase
|
Create_ds/flink-connector-hbase/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/table/HBaseConnectorOptionsUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.types.DataType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import java.util.Map;
import java.util.Properties;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_INTERVAL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_ROWS;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_SIZE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_IGNORE_NULL_VALUE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_PARALLELISM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_QUORUM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_ZNODE_PARENT;
/** Utilities for {@link HBaseConnectorOptions}. */
@Internal
public class HBaseConnectorOptionsUtil {
/** Prefix for HBase specific properties. */
public static final String PROPERTIES_PREFIX = "properties.";
// --------------------------------------------------------------------------------------------
// Validation
// --------------------------------------------------------------------------------------------
/**
* Checks that the HBase table have row key defined. A row key is defined as an atomic type, and
* column families and qualifiers are defined as ROW type. There shouldn't be multiple atomic
* type columns in the schema. The PRIMARY KEY constraint is optional, if exist, the primary key
* constraint must be defined on the single row key field.
*/
public static void validatePrimaryKey(DataType dataType, int[] primaryKeyIndexes) {
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(dataType);
if (!hbaseSchema.getRowKeyName().isPresent()) {
throw new IllegalArgumentException(
"HBase table requires to define a row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.");
}
if (primaryKeyIndexes.length == 0) {
return;
}
if (primaryKeyIndexes.length > 1) {
throw new IllegalArgumentException(
"HBase table doesn't support a primary Key on multiple columns. "
+ "The primary key of HBase table must be defined on row key field.");
}
if (!hbaseSchema
.getRowKeyName()
.get()
.equals(DataType.getFieldNames(dataType).get(primaryKeyIndexes[0]))) {
throw new IllegalArgumentException(
"Primary key of HBase table must be defined on the row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.");
}
}
public static HBaseWriteOptions getHBaseWriteOptions(ReadableConfig tableOptions) {
HBaseWriteOptions.Builder builder = HBaseWriteOptions.builder();
builder.setBufferFlushIntervalMillis(
tableOptions.get(SINK_BUFFER_FLUSH_INTERVAL).toMillis());
builder.setBufferFlushMaxRows(tableOptions.get(SINK_BUFFER_FLUSH_MAX_ROWS));
builder.setBufferFlushMaxSizeInBytes(
tableOptions.get(SINK_BUFFER_FLUSH_MAX_SIZE).getBytes());
builder.setIgnoreNullValue(tableOptions.get(SINK_IGNORE_NULL_VALUE));
builder.setParallelism(tableOptions.getOptional(SINK_PARALLELISM).orElse(null));
return builder.build();
}
/** config HBase Configuration. */
public static Configuration getHBaseConfiguration(ReadableConfig tableOptions) {
// create default configuration from current runtime env (`hbase-site.xml` in classpath)
// first,
Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, tableOptions.get(ZOOKEEPER_QUORUM));
hbaseClientConf.set(
HConstants.ZOOKEEPER_ZNODE_PARENT, tableOptions.get(ZOOKEEPER_ZNODE_PARENT));
// add HBase properties
final Properties properties =
getHBaseClientProperties(
((org.apache.flink.configuration.Configuration) tableOptions).toMap());
properties.forEach((k, v) -> hbaseClientConf.set(k.toString(), v.toString()));
return hbaseClientConf;
}
private static Properties getHBaseClientProperties(Map<String, String> tableOptions) {
final Properties hbaseProperties = new Properties();
if (containsHBaseClientProperties(tableOptions)) {
tableOptions.keySet().stream()
.filter(key -> key.startsWith(PROPERTIES_PREFIX))
.forEach(
key -> {
final String value = tableOptions.get(key);
final String subKey = key.substring((PROPERTIES_PREFIX).length());
hbaseProperties.put(subKey, value);
});
}
return hbaseProperties;
}
/** Returns whether the table options contains HBase client properties or not. 'properties'. */
private static boolean containsHBaseClientProperties(Map<String, String> tableOptions) {
return tableOptions.keySet().stream().anyMatch(k -> k.startsWith(PROPERTIES_PREFIX));
}
private HBaseConnectorOptionsUtil() {}
}
| 3,947 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/slf4j
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.slf4j.impl;
/** Fake appender to work around HBase referring to it directly. */
public interface Log4jLoggerAdapter {}
| 3,948 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = "org.apache.flink.connector.hbase1",
importOptions = {
ImportOption.OnlyIncludeTests.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
}
| 3,949 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDynamicTableFactoryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.source.HBaseRowDataLookupFunction;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink;
import org.apache.flink.connector.hbase1.source.HBaseDynamicTableSource;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.functions.LookupFunction;
import org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext;
import org.apache.flink.table.runtime.connector.source.LookupRuntimeProviderContext;
import org.apache.flink.table.types.DataType;
import org.apache.flink.util.ExceptionUtils;
import org.apache.commons.collections.IteratorUtils;
import org.apache.hadoop.hbase.HConstants;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import static org.apache.flink.connector.hbase.util.HBaseConfigurationUtil.getHBaseConfiguration;
import static org.apache.flink.table.api.DataTypes.BIGINT;
import static org.apache.flink.table.api.DataTypes.BOOLEAN;
import static org.apache.flink.table.api.DataTypes.DATE;
import static org.apache.flink.table.api.DataTypes.DECIMAL;
import static org.apache.flink.table.api.DataTypes.DOUBLE;
import static org.apache.flink.table.api.DataTypes.FIELD;
import static org.apache.flink.table.api.DataTypes.INT;
import static org.apache.flink.table.api.DataTypes.ROW;
import static org.apache.flink.table.api.DataTypes.STRING;
import static org.apache.flink.table.api.DataTypes.TIME;
import static org.apache.flink.table.api.DataTypes.TIMESTAMP;
import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink;
import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** Unit test for {@link HBase1DynamicTableFactory}. */
public class HBaseDynamicTableFactoryTest {
private static final String FAMILY1 = "f1";
private static final String FAMILY2 = "f2";
private static final String FAMILY3 = "f3";
private static final String FAMILY4 = "f4";
private static final String COL1 = "c1";
private static final String COL2 = "c2";
private static final String COL3 = "c3";
private static final String COL4 = "c4";
private static final String ROWKEY = "rowkey";
@Rule public final ExpectedException thrown = ExpectedException.none();
@SuppressWarnings("rawtypes")
@Test
public void testTableSourceFactory() {
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(FAMILY1, ROW(FIELD(COL1, INT()))),
Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL2, BIGINT()))),
Column.physical(ROWKEY, BIGINT()),
Column.physical(
FAMILY3,
ROW(
FIELD(COL1, DOUBLE()),
FIELD(COL2, BOOLEAN()),
FIELD(COL3, STRING()))),
Column.physical(
FAMILY4,
ROW(
FIELD(COL1, DECIMAL(10, 3)),
FIELD(COL2, TIMESTAMP(3)),
FIELD(COL3, DATE()),
FIELD(COL4, TIME()))));
DynamicTableSource source = createTableSource(schema, getAllOptions());
assertTrue(source instanceof HBaseDynamicTableSource);
HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
int[][] lookupKey = {{2}};
LookupTableSource.LookupRuntimeProvider lookupProvider =
hbaseSource.getLookupRuntimeProvider(new LookupRuntimeProviderContext(lookupKey));
assertTrue(lookupProvider instanceof LookupFunctionProvider);
LookupFunction tableFunction =
((LookupFunctionProvider) lookupProvider).createLookupFunction();
assertTrue(tableFunction instanceof HBaseRowDataLookupFunction);
assertEquals(
"testHBastTable", ((HBaseRowDataLookupFunction) tableFunction).getHTableName());
HBaseTableSchema hbaseSchema = hbaseSource.getHBaseTableSchema();
assertEquals(2, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(Types.LONG), hbaseSchema.getRowKeyTypeInfo());
assertArrayEquals(new String[] {"f1", "f2", "f3", "f4"}, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] {"c1"}, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] {"c1", "c2"}, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] {"c1", "c2", "c3"}, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(
new String[] {"c1", "c2", "c3", "c4"}, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(new DataType[] {INT()}, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(
new DataType[] {INT(), BIGINT()}, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(
new DataType[] {DOUBLE(), BOOLEAN(), STRING()},
hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(
new DataType[] {DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME()},
hbaseSchema.getQualifierDataTypes("f4"));
}
@Test
public void testLookupOptions() {
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
Map<String, String> options = getAllOptions();
options.put("lookup.cache", "PARTIAL");
options.put("lookup.partial-cache.expire-after-access", "15213s");
options.put("lookup.partial-cache.expire-after-write", "18213s");
options.put("lookup.partial-cache.max-rows", "10000");
options.put("lookup.partial-cache.cache-missing-key", "false");
options.put("lookup.max-retries", "15513");
DynamicTableSource source = createTableSource(schema, options);
HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
assertThat(((HBaseDynamicTableSource) source).getMaxRetryTimes()).isEqualTo(15513);
assertThat(hbaseSource.getCache()).isInstanceOf(DefaultLookupCache.class);
DefaultLookupCache cache = (DefaultLookupCache) hbaseSource.getCache();
assertThat(cache)
.isEqualTo(
DefaultLookupCache.newBuilder()
.expireAfterAccess(Duration.ofSeconds(15213))
.expireAfterWrite(Duration.ofSeconds(18213))
.maximumSize(10000)
.cacheMissingKey(false)
.build());
}
@Test
public void testTableSinkFactory() {
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))),
Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))),
Column.physical(
FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))),
Column.physical(
FAMILY4,
ROW(
FIELD(COL1, DECIMAL(10, 3)),
FIELD(COL2, TIMESTAMP(3)),
FIELD(COL3, DATE()),
FIELD(COL4, TIME()))));
DynamicTableSink sink = createTableSink(schema, getAllOptions());
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
assertEquals(0, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
assertArrayEquals(new String[] {"f1", "f2", "f3", "f4"}, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] {"c1", "c2"}, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] {"c1", "c3"}, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] {"c2", "c3"}, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(
new String[] {"c1", "c2", "c3", "c4"}, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(
new DataType[] {DOUBLE(), INT()}, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(
new DataType[] {INT(), BIGINT()}, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(
new DataType[] {BOOLEAN(), STRING()}, hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(
new DataType[] {DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME()},
hbaseSchema.getQualifierDataTypes("f4"));
// verify hadoop Configuration
org.apache.hadoop.conf.Configuration expectedConfiguration = getHBaseConfiguration();
expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
expectedConfiguration.set("hbase.security.authentication", "kerberos");
org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
assertEquals(
IteratorUtils.toList(expectedConfiguration.iterator()),
IteratorUtils.toList(actualConfiguration.iterator()));
// verify tableName
assertEquals("testHBastTable", hbaseSink.getTableName());
HBaseWriteOptions expectedWriteOptions =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(1000)
.setBufferFlushIntervalMillis(1000)
.setBufferFlushMaxSizeInBytes(2 * 1024 * 1024)
.build();
HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
assertEquals(expectedWriteOptions, actualWriteOptions);
}
@Test
public void testBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "10mb");
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "10s");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(100)
.setBufferFlushIntervalMillis(10 * 1000)
.setBufferFlushMaxSizeInBytes(10 * 1024 * 1024)
.build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
@Test
public void testSinkIgnoreNullValueOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.ignore-null-value", "true");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertThat(actual.isIgnoreNullValue()).isTrue();
}
@Test
public void testParallelismOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.parallelism", "2");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
SinkFunctionProvider provider =
(SinkFunctionProvider)
hbaseSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertEquals(2, (long) provider.getParallelism().get());
}
@Test
public void testDisabledBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "0");
options.put("sink.buffer-flush.max-rows", "0");
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(0)
.setBufferFlushIntervalMillis(0)
.setBufferFlushMaxSizeInBytes(0)
.build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
@Test
public void testUnknownOption() {
Map<String, String> options = getAllOptions();
options.put("sink.unknown.key", "unknown-value");
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e, "Unsupported options:\n\nsink.unknown.key")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e, "Unsupported options:\n\nsink.unknown.key")
.isPresent());
}
}
@Test
public void testTypeWithUnsupportedPrecision() {
Map<String, String> options = getAllOptions();
// test unsupported timestamp precision
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(
FAMILY1, ROW(FIELD(COL1, TIMESTAMP(6)), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIMESTAMP type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIMESTAMP type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
// test unsupported time precision
schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, TIME(6)), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIME type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIME type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
}
private Map<String, String> getAllOptions() {
Map<String, String> options = new HashMap<>();
options.put("connector", "hbase-1.4");
options.put("table-name", "testHBastTable");
options.put("zookeeper.quorum", "localhost:2181");
options.put("zookeeper.znode.parent", "/flink");
options.put("properties.hbase.security.authentication", "kerberos");
return options;
}
}
| 3,950 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTablePlanTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.planner.utils.StreamTableTestUtil;
import org.apache.flink.table.planner.utils.TableTestBase;
import org.junit.Test;
import static org.apache.flink.core.testutils.FlinkMatchers.containsCause;
/** Plan tests for HBase connector, for example, testing projection push down. */
public class HBaseTablePlanTest extends TableTestBase {
private final StreamTableTestUtil util = streamTestUtil(TableConfig.getDefault());
@Test
public void testMultipleRowKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " rowkey INT,"
+ " rowkey2 STRING "
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Row key can't be set multiple times.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testNoneRowKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"HBase table requires to define a row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testInvalidPrimaryKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " rowkey STRING, "
+ " PRIMARY KEY (family1) NOT ENFORCED "
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Primary key of HBase table must be defined on the row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testUnsupportedDataType() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " col1 ARRAY<STRING>, "
+ " rowkey STRING, "
+ " PRIMARY KEY (rowkey) NOT ENFORCED "
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Unsupported field type 'ARRAY<STRING>' for HBase.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testProjectionPushDown() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
util.verifyExecPlan("SELECT h.family3, h.family2.col2 FROM hTable AS h");
}
}
| 3,951 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseConnectorITCase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase1.source.AbstractTableInputFormat;
import org.apache.flink.connector.hbase1.source.HBaseRowDataInputFormat;
import org.apache.flink.connector.hbase1.util.HBaseTestBase;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.ScalarFunction;
import org.apache.flink.table.planner.factories.TestValuesTableFactory;
import org.apache.flink.test.util.TestBaseUtils;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.CollectionUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.table.api.Expressions.$;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
/** IT cases for HBase connector (source and sink). */
public class HBaseConnectorITCase extends HBaseTestBase {
// -------------------------------------------------------------------------------------
// HBaseTableSource tests
// -------------------------------------------------------------------------------------
@Test
public void testTableSourceFullScan() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table =
tEnv.sqlQuery(
"SELECT "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[10, Hello-1, 100, 1.01, false, Welt-1]\n"
+ "+I[20, Hello-2, 200, 2.02, true, Welt-2]\n"
+ "+I[30, Hello-3, 300, 3.03, false, Welt-3]\n"
+ "+I[40, null, 400, 4.04, true, Welt-4]\n"
+ "+I[50, Hello-5, 500, 5.05, false, Welt-5]\n"
+ "+I[60, Hello-6, 600, 6.06, true, Welt-6]\n"
+ "+I[70, Hello-7, 700, 7.07, false, Welt-7]\n"
+ "+I[80, null, 800, 8.08, true, Welt-8]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceEmptyTableScan() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_EMPTY_TABLE
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table = tEnv.sqlQuery("SELECT rowkey, h.family1.col1 FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
assertThat(results).isEmpty();
}
@Test
public void testTableSourceProjection() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table =
tEnv.sqlQuery(
"SELECT "
+ " h.family1.col1, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[10, 1.01, false, Welt-1]\n"
+ "+I[20, 2.02, true, Welt-2]\n"
+ "+I[30, 3.03, false, Welt-3]\n"
+ "+I[40, 4.04, true, Welt-4]\n"
+ "+I[50, 5.05, false, Welt-5]\n"
+ "+I[60, 6.06, true, Welt-6]\n"
+ "+I[70, 7.07, false, Welt-7]\n"
+ "+I[80, 8.08, true, Welt-8]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceFieldOrder() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " family1 ROW<col1 INT>"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table = tEnv.sqlQuery("SELECT * FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[1, +I[Hello-1, 100], +I[1.01, false, Welt-1], +I[10]]\n"
+ "+I[2, +I[Hello-2, 200], +I[2.02, true, Welt-2], +I[20]]\n"
+ "+I[3, +I[Hello-3, 300], +I[3.03, false, Welt-3], +I[30]]\n"
+ "+I[4, +I[null, 400], +I[4.04, true, Welt-4], +I[40]]\n"
+ "+I[5, +I[Hello-5, 500], +I[5.05, false, Welt-5], +I[50]]\n"
+ "+I[6, +I[Hello-6, 600], +I[6.06, true, Welt-6], +I[60]]\n"
+ "+I[7, +I[Hello-7, 700], +I[7.07, false, Welt-7], +I[70]]\n"
+ "+I[8, +I[null, 800], +I[8.08, true, Welt-8], +I[80]]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceReadAsByteArray() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family2 ROW<col1 BYTES, col2 BYTES>,"
+ " rowkey INT"
+ // no primary key syntax
") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
tEnv.registerFunction("toUTF8", new ToUTF8());
tEnv.registerFunction("toLong", new ToLong());
Table table =
tEnv.sqlQuery(
"SELECT "
+ " toUTF8(h.family2.col1), "
+ " toLong(h.family2.col2) "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[Hello-1, 100]\n"
+ "+I[Hello-2, 200]\n"
+ "+I[Hello-3, 300]\n"
+ "+I[null, 400]\n"
+ "+I[Hello-5, 500]\n"
+ "+I[Hello-6, 600]\n"
+ "+I[Hello-7, 700]\n"
+ "+I[null, 800]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSink() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register HBase table testTable1 which contains test data
String table1DDL = createHBaseTableDDL(TEST_TABLE_1, false);
tEnv.executeSql(table1DDL);
String table2DDL = createHBaseTableDDL(TEST_TABLE_2, false);
tEnv.executeSql(table2DDL);
String query =
"INSERT INTO "
+ TEST_TABLE_2
+ " SELECT"
+ " rowkey,"
+ " family1,"
+ " family2,"
+ " family3"
+ " FROM "
+ TEST_TABLE_1;
tEnv.executeSql(query).await();
// start a batch scan job to verify contents in HBase table
TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
batchEnv.executeSql(table2DDL);
Table table =
batchEnv.sqlQuery(
"SELECT "
+ " h.rowkey, "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM "
+ TEST_TABLE_2
+ " AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[1, 10, Hello-1, 100, 1.01, false, Welt-1]\n"
+ "+I[2, 20, Hello-2, 200, 2.02, true, Welt-2]\n"
+ "+I[3, 30, Hello-3, 300, 3.03, false, Welt-3]\n"
+ "+I[4, 40, null, 400, 4.04, true, Welt-4]\n"
+ "+I[5, 50, Hello-5, 500, 5.05, false, Welt-5]\n"
+ "+I[6, 60, Hello-6, 600, 6.06, true, Welt-6]\n"
+ "+I[7, 70, Hello-7, 700, 7.07, false, Welt-7]\n"
+ "+I[8, 80, null, 800, 8.08, true, Welt-8]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSinkWithChangelog() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register values table for source
String dataId =
TestValuesTableFactory.registerData(
Arrays.asList(
Row.ofKind(RowKind.INSERT, 1, Row.of("Hello1")),
Row.ofKind(RowKind.DELETE, 1, Row.of("Hello2")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello1")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello2")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello3")),
Row.ofKind(RowKind.DELETE, 2, Row.of("Hello3")),
Row.ofKind(RowKind.INSERT, 1, Row.of("Hello3"))));
tEnv.executeSql(
"CREATE TABLE source_table ("
+ " rowkey INT,"
+ " family1 ROW<name STRING>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'values',"
+ " 'data-id' = '"
+ dataId
+ "',"
+ " 'changelog-mode'='I,UA,UB,D'"
+ ")");
// register HBase table for sink
tEnv.executeSql(
"CREATE TABLE sink_table ("
+ " rowkey INT,"
+ " family1 ROW<name STRING>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_4
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
tEnv.executeSql("INSERT INTO sink_table SELECT * FROM source_table").await();
TableResult result = tEnv.executeSql("SELECT * FROM sink_table");
List<Row> actual = CollectionUtil.iteratorToList(result.collect());
assertThat(actual).isEqualTo(Collections.singletonList(Row.of(1, Row.of("Hello3"))));
}
@Test
public void testTableSinkWithTimestampMetadata() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql(
"CREATE TABLE hTableForSink ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family1 ROW<col1 INT>,"
+ " version TIMESTAMP_LTZ(3) METADATA FROM 'timestamp'"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_5
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
String insert =
"INSERT INTO hTableForSink VALUES"
+ "(1, ROW(1), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(2, ROW(2), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(3, ROW(3), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(1, ROW(10), TO_TIMESTAMP_LTZ(1696767943269, 3)),"
+ "(2, ROW(20), TO_TIMESTAMP_LTZ(1696767943271, 3))";
tEnv.executeSql(insert).await();
tEnv.executeSql(
"CREATE TABLE hTableForQuery ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family1 ROW<col1 INT>"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_5
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
TableResult result = tEnv.executeSql("SELECT rowkey, family1.col1 FROM hTableForQuery");
List<Row> results = CollectionUtil.iteratorToList(result.collect());
String expected = "+I[1, 1]\n+I[2, 20]\n+I[3, 3]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceSinkWithDDL() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register HBase table testTable1 which contains test data
String table1DDL = createHBaseTableDDL(TEST_TABLE_1, true);
tEnv.executeSql(table1DDL);
// register HBase table which is empty
String table3DDL = createHBaseTableDDL(TEST_TABLE_3, true);
tEnv.executeSql(table3DDL);
String insertStatement =
"INSERT INTO "
+ TEST_TABLE_3
+ " SELECT rowkey,"
+ " family1,"
+ " family2,"
+ " family3,"
+ " family4"
+ " from "
+ TEST_TABLE_1;
tEnv.executeSql(insertStatement).await();
// start a batch scan job to verify contents in HBase table
TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
batchEnv.executeSql(table3DDL);
String query =
"SELECT "
+ " h.rowkey, "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3, "
+ " h.family4.col1, "
+ " h.family4.col2, "
+ " h.family4.col3, "
+ " h.family4.col4 "
+ " FROM "
+ TEST_TABLE_3
+ " AS h";
Iterator<Row> collected = tEnv.executeSql(query).collect();
List<String> result =
CollectionUtil.iteratorToList(collected).stream()
.map(Row::toString)
.sorted()
.collect(Collectors.toList());
List<String> expected = new ArrayList<>();
expected.add(
"+I[1, 10, Hello-1, 100, 1.01, false, Welt-1, 2019-08-18T19:00, 2019-08-18, 19:00, 12345678.0001]");
expected.add(
"+I[2, 20, Hello-2, 200, 2.02, true, Welt-2, 2019-08-18T19:01, 2019-08-18, 19:01, 12345678.0002]");
expected.add(
"+I[3, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
expected.add(
"+I[4, 40, null, 400, 4.04, true, Welt-4, 2019-08-18T19:03, 2019-08-18, 19:03, 12345678.0004]");
expected.add(
"+I[5, 50, Hello-5, 500, 5.05, false, Welt-5, 2019-08-19T19:10, 2019-08-19, 19:10, 12345678.0005]");
expected.add(
"+I[6, 60, Hello-6, 600, 6.06, true, Welt-6, 2019-08-19T19:20, 2019-08-19, 19:20, 12345678.0006]");
expected.add(
"+I[7, 70, Hello-7, 700, 7.07, false, Welt-7, 2019-08-19T19:30, 2019-08-19, 19:30, 12345678.0007]");
expected.add(
"+I[8, 80, null, 800, 8.08, true, Welt-8, 2019-08-19T19:40, 2019-08-19, 19:40, 12345678.0008]");
assertEquals(expected, result);
}
@Test
public void testHBaseLookupTableSource() {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql(
"CREATE TABLE "
+ TEST_TABLE_1
+ " ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " family4 ROW<col1 TIMESTAMP(3), col2 DATE, col3 TIME(3), col4 DECIMAL(12, 4)>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-1.4',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
// prepare a source table
String srcTableName = "src";
DataStream<Row> srcDs = execEnv.fromCollection(testData).returns(testTypeInfo);
Table in = tEnv.fromDataStream(srcDs, $("a"), $("b"), $("c"), $("proc").proctime());
tEnv.registerTable(srcTableName, in);
// perform a temporal table join query
String dimJoinQuery =
"SELECT"
+ " a,"
+ " b,"
+ " h.family1.col1,"
+ " h.family2.col1,"
+ " h.family2.col2,"
+ " h.family3.col1,"
+ " h.family3.col2,"
+ " h.family3.col3,"
+ " h.family4.col1,"
+ " h.family4.col2,"
+ " h.family4.col3,"
+ " h.family4.col4 "
+ " FROM src JOIN "
+ TEST_TABLE_1
+ " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.rowkey";
Iterator<Row> collected = tEnv.executeSql(dimJoinQuery).collect();
List<String> result =
CollectionUtil.iteratorToList(collected).stream()
.map(Row::toString)
.sorted()
.collect(Collectors.toList());
List<String> expected = new ArrayList<>();
expected.add(
"+I[1, 1, 10, Hello-1, 100, 1.01, false, Welt-1, 2019-08-18T19:00, 2019-08-18, 19:00, 12345678.0001]");
expected.add(
"+I[2, 2, 20, Hello-2, 200, 2.02, true, Welt-2, 2019-08-18T19:01, 2019-08-18, 19:01, 12345678.0002]");
expected.add(
"+I[3, 2, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
expected.add(
"+I[3, 3, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
assertEquals(expected, result);
}
@Test
public void testTableInputFormatOpenClose() throws IOException {
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
AbstractTableInputFormat<?> inputFormat =
new HBaseRowDataInputFormat(getConf(), TEST_TABLE_1, tableSchema, "null");
inputFormat.open(inputFormat.createInputSplits(1)[0]);
assertNotNull(inputFormat.getConnection());
assertNotNull(inputFormat.getConnection().getTable(TableName.valueOf(TEST_TABLE_1)));
inputFormat.close();
assertNull(inputFormat.getConnection());
}
@Test
public void testTableInputFormatTableExistence() throws IOException {
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
AbstractTableInputFormat<?> inputFormat =
new HBaseRowDataInputFormat(getConf(), TEST_NOT_EXISTS_TABLE, tableSchema, "null");
assertThatThrownBy(() -> inputFormat.createInputSplits(1))
.isExactlyInstanceOf(TableNotFoundException.class);
inputFormat.close();
assertNull(inputFormat.getConnection());
}
@Test
public void testHBaseSinkFunctionTableExistence() throws Exception {
org.apache.hadoop.conf.Configuration hbaseConf =
HBaseConfigurationUtil.getHBaseConfiguration();
hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, getZookeeperQuorum());
hbaseConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase");
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
HBaseSinkFunction<RowData> sinkFunction =
new HBaseSinkFunction<>(
TEST_NOT_EXISTS_TABLE,
hbaseConf,
new RowDataToMutationConverter(
tableSchema,
tableSchema.convertToDataType(),
Collections.emptyList(),
"null",
false),
2 * 1024 * 1024,
1000,
1000);
assertThatThrownBy(() -> sinkFunction.open(new Configuration()))
.getRootCause()
.isExactlyInstanceOf(TableNotFoundException.class);
sinkFunction.close();
}
// -------------------------------------------------------------------------------------
// HBase lookup source tests
// -------------------------------------------------------------------------------------
// prepare a source collection.
private static final List<Row> testData = new ArrayList<>();
private static final RowTypeInfo testTypeInfo =
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.LONG, Types.STRING},
new String[] {"a", "b", "c"});
static {
testData.add(Row.of(1, 1L, "Hi"));
testData.add(Row.of(2, 2L, "Hello"));
testData.add(Row.of(3, 2L, "Hello world"));
testData.add(Row.of(3, 3L, "Hello world!"));
}
// ------------------------------- Utilities -------------------------------------------------
/** A {@link ScalarFunction} that maps byte arrays to UTF-8 strings. */
public static class ToUTF8 extends ScalarFunction {
private static final long serialVersionUID = 1L;
public String eval(byte[] bytes) {
return Bytes.toString(bytes);
}
}
/** A {@link ScalarFunction} that maps byte array to longs. */
public static class ToLong extends ScalarFunction {
private static final long serialVersionUID = 1L;
public long eval(byte[] bytes) {
return Bytes.toLong(bytes);
}
}
private String createHBaseTableDDL(String tableName, boolean testTimeAndDecimalTypes) {
StringBuilder family4Statement = new StringBuilder();
if (testTimeAndDecimalTypes) {
family4Statement.append(", family4 ROW<col1 TIMESTAMP(3)");
family4Statement.append(", col2 DATE");
family4Statement.append(", col3 TIME(3)");
family4Statement.append(", col4 DECIMAL(12, 4)");
family4Statement.append("> \n");
}
return "CREATE TABLE "
+ tableName
+ "(\n"
+ " rowkey INT,"
+ " family1 ROW<col1 INT>,\n"
+ " family2 ROW<col1 VARCHAR, col2 BIGINT>,\n"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>"
+ family4Statement.toString()
+ ") WITH (\n"
+ " 'connector' = 'hbase-1.4',\n"
+ " 'table-name' = '"
+ tableName
+ "',\n"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "',\n"
+ " 'zookeeper.znode.parent' = '/hbase' "
+ ")";
}
}
| 3,952 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.util;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import static org.apache.flink.table.utils.DateTimeUtils.toInternal;
/** Abstract IT case class for HBase. */
public abstract class HBaseTestBase extends HBaseTestingClusterAutoStarter {
protected static final String TEST_TABLE_1 = "testTable1";
protected static final String TEST_TABLE_2 = "testTable2";
protected static final String TEST_TABLE_3 = "testTable3";
protected static final String TEST_TABLE_4 = "testTable4";
protected static final String TEST_TABLE_5 = "testTable5";
protected static final String TEST_EMPTY_TABLE = "testEmptyTable";
protected static final String TEST_NOT_EXISTS_TABLE = "notExistsTable";
protected static final String ROW_KEY = "rowkey";
protected static final String FAMILY1 = "family1";
protected static final String F1COL1 = "col1";
protected static final String FAMILY2 = "family2";
protected static final String F2COL1 = "col1";
protected static final String F2COL2 = "col2";
protected static final String FAMILY3 = "family3";
protected static final String F3COL1 = "col1";
protected static final String F3COL2 = "col2";
protected static final String F3COL3 = "col3";
protected static final String FAMILY4 = "family4";
protected static final String F4COL1 = "col1";
protected static final String F4COL2 = "col2";
protected static final String F4COL3 = "col3";
protected static final String F4COL4 = "col4";
private static final byte[][] FAMILIES =
new byte[][] {
Bytes.toBytes(FAMILY1),
Bytes.toBytes(FAMILY2),
Bytes.toBytes(FAMILY3),
Bytes.toBytes(FAMILY4)
};
private static final byte[][] SPLIT_KEYS = new byte[][] {Bytes.toBytes(4)};
protected EnvironmentSettings streamSettings;
protected EnvironmentSettings batchSettings;
@BeforeClass
public static void activateHBaseCluster() throws IOException {
prepareTables();
}
@Before
public void before() {
this.streamSettings = EnvironmentSettings.inStreamingMode();
this.batchSettings = EnvironmentSettings.inBatchMode();
}
private static void prepareTables() throws IOException {
createHBaseTable1();
createHBaseTable2();
createHBaseTable3();
createHBaseTable4();
createHBaseTable5();
createEmptyHBaseTable();
}
private static void createHBaseTable1() throws IOException {
// create a table
TableName tableName = TableName.valueOf(TEST_TABLE_1);
createTable(tableName, FAMILIES, SPLIT_KEYS);
// get the HTable instance
HTable table = openTable(tableName);
List<Put> puts = new ArrayList<>();
// add some data
puts.add(
putRow(
1,
10,
"Hello-1",
100L,
1.01,
false,
"Welt-1",
Timestamp.valueOf("2019-08-18 19:00:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:00:00"),
new BigDecimal("12345678.0001")));
puts.add(
putRow(
2,
20,
"Hello-2",
200L,
2.02,
true,
"Welt-2",
Timestamp.valueOf("2019-08-18 19:01:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:01:00"),
new BigDecimal("12345678.0002")));
puts.add(
putRow(
3,
30,
"Hello-3",
300L,
3.03,
false,
"Welt-3",
Timestamp.valueOf("2019-08-18 19:02:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:02:00"),
new BigDecimal("12345678.0003")));
puts.add(
putRow(
4,
40,
null,
400L,
4.04,
true,
"Welt-4",
Timestamp.valueOf("2019-08-18 19:03:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:03:00"),
new BigDecimal("12345678.0004")));
puts.add(
putRow(
5,
50,
"Hello-5",
500L,
5.05,
false,
"Welt-5",
Timestamp.valueOf("2019-08-19 19:10:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:10:00"),
new BigDecimal("12345678.0005")));
puts.add(
putRow(
6,
60,
"Hello-6",
600L,
6.06,
true,
"Welt-6",
Timestamp.valueOf("2019-08-19 19:20:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:20:00"),
new BigDecimal("12345678.0006")));
puts.add(
putRow(
7,
70,
"Hello-7",
700L,
7.07,
false,
"Welt-7",
Timestamp.valueOf("2019-08-19 19:30:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:30:00"),
new BigDecimal("12345678.0007")));
puts.add(
putRow(
8,
80,
null,
800L,
8.08,
true,
"Welt-8",
Timestamp.valueOf("2019-08-19 19:40:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:40:00"),
new BigDecimal("12345678.0008")));
// append rows to table
table.put(puts);
table.close();
}
private static void createHBaseTable2() {
// create a table
TableName tableName = TableName.valueOf(TEST_TABLE_2);
createTable(tableName, FAMILIES, SPLIT_KEYS);
}
private static void createHBaseTable3() {
// create a table
byte[][] families =
new byte[][] {
Bytes.toBytes(FAMILY1),
Bytes.toBytes(FAMILY2),
Bytes.toBytes(FAMILY3),
Bytes.toBytes(FAMILY4),
};
TableName tableName = TableName.valueOf(TEST_TABLE_3);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createHBaseTable4() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_TABLE_4);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createHBaseTable5() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_TABLE_5);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createEmptyHBaseTable() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_EMPTY_TABLE);
createTable(tableName, families, SPLIT_KEYS);
}
private static Put putRow(
int rowKey,
int f1c1,
String f2c1,
long f2c2,
double f3c1,
boolean f3c2,
String f3c3,
Timestamp f4c1,
Date f4c2,
Time f4c3,
BigDecimal f4c4) {
Put put = new Put(Bytes.toBytes(rowKey));
// family 1
put.addColumn(Bytes.toBytes(FAMILY1), Bytes.toBytes(F1COL1), Bytes.toBytes(f1c1));
// family 2
if (f2c1 != null) {
put.addColumn(Bytes.toBytes(FAMILY2), Bytes.toBytes(F2COL1), Bytes.toBytes(f2c1));
}
put.addColumn(Bytes.toBytes(FAMILY2), Bytes.toBytes(F2COL2), Bytes.toBytes(f2c2));
// family 3
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL1), Bytes.toBytes(f3c1));
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL2), Bytes.toBytes(f3c2));
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL3), Bytes.toBytes(f3c3));
// family 4
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL1), Bytes.toBytes(toInternal(f4c1)));
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL2), Bytes.toBytes(toInternal(f4c2)));
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL3), Bytes.toBytes(toInternal(f4c3)));
put.addColumn(Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL4), Bytes.toBytes(f4c4));
return put;
}
}
| 3,953 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestingClusterAutoStarter.java
|
/*
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.util;
import org.apache.flink.test.util.AbstractTestBase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.util.VersionUtil;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* By using this class as the super class of a set of tests you will have a HBase testing cluster
* available that is very suitable for writing tests for scanning and filtering against. This is
* usable by any downstream application because the HBase cluster is 'injected' because a
* dynamically generated hbase-site.xml is added to the classpath. Because of this classpath
* manipulation it is not possible to start a second testing cluster in the same JVM. So if you have
* this you should either put all hbase related tests in a single class or force surefire to setup a
* new JVM for each testclass. See:
* http://maven.apache.org/surefire/maven-surefire-plugin/examples/fork-options-and-parallel-execution.html
*/
//
// NOTE: The code in this file is based on code from the
// Apache HBase project, licensed under the Apache License v 2.0
//
// https://github.com/apache/hbase/blob/master/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java
//
public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
private static final Log LOG = LogFactory.getLog(HBaseTestingClusterAutoStarter.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static HBaseAdmin admin = null;
private static List<TableName> createdTables = new ArrayList<>();
private static Configuration conf;
protected static void createTable(
TableName tableName, byte[][] columnFamilyName, byte[][] splitKeys) {
LOG.info("HBase minicluster: Creating table " + tableName.getNameAsString());
assertNotNull("HBaseAdmin is not initialized successfully.", admin);
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] fam : columnFamilyName) {
HColumnDescriptor colDef = new HColumnDescriptor(fam);
desc.addFamily(colDef);
}
try {
admin.createTable(desc, splitKeys);
createdTables.add(tableName);
assertTrue("Fail to create the table", admin.tableExists(tableName));
} catch (IOException e) {
assertNull("Exception found while creating table", e);
}
}
protected static HTable openTable(TableName tableName) throws IOException {
HTable table = (HTable) admin.getConnection().getTable(tableName);
assertTrue("Fail to create the table", admin.tableExists(tableName));
return table;
}
private static void deleteTables() {
if (admin != null) {
for (TableName tableName : createdTables) {
try {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
} catch (IOException e) {
assertNull("Exception found deleting the table", e);
}
}
}
}
private static void initialize(Configuration c) {
conf = HBaseConfiguration.create(c);
// the default retry number is 35 in hbase-1.4, set 35 for test
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 35);
try {
admin = TEST_UTIL.getHBaseAdmin();
} catch (MasterNotRunningException e) {
assertNull("Master is not running", e);
} catch (ZooKeeperConnectionException e) {
assertNull("Cannot connect to ZooKeeper", e);
} catch (IOException e) {
assertNull("IOException", e);
}
}
@BeforeClass
public static void setUp() throws Exception {
// HBase 1.4 does not work with Hadoop 3
// because it uses Guava 12.0.1, Hadoop 3 uses Guava 27.0-jre.
// There is no Guava version in between that works with both.
Assume.assumeTrue(
"This test is skipped for Hadoop versions above 3",
VersionUtil.compareVersions(System.getProperty("hadoop.version", "2.8.5"), "3.0.0")
< 0);
LOG.info("HBase minicluster: Starting");
TEST_UTIL.startMiniCluster(1);
// https://issues.apache.org/jira/browse/HBASE-11711
TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);
// Make sure the zookeeper quorum value contains the right port number (varies per run).
LOG.info("Hbase minicluster client port: " + TEST_UTIL.getZkCluster().getClientPort());
TEST_UTIL
.getConfiguration()
.set(
"hbase.zookeeper.quorum",
"localhost:" + TEST_UTIL.getZkCluster().getClientPort());
initialize(TEST_UTIL.getConfiguration());
LOG.info("HBase minicluster: Running");
}
/** Returns zookeeper quorum value contains the right port number (varies per run). */
protected static String getZookeeperQuorum() {
return "localhost:" + TEST_UTIL.getZkCluster().getClientPort();
}
public static Configuration getConf() {
return conf;
}
@AfterClass
public static void tearDown() throws Exception {
if (conf == null) {
LOG.info("Skipping Hbase tear down. It was never started");
return;
}
LOG.info("HBase minicluster: Shutting down");
deleteTables();
TEST_UTIL.shutdownMiniCluster();
LOG.info("HBase minicluster: Down");
}
}
| 3,954 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1DynamicTableFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink;
import org.apache.flink.connector.hbase1.source.HBaseDynamicTableSource;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper;
import org.apache.hadoop.conf.Configuration;
import java.time.Duration;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_ASYNC;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_CACHE_MAX_ROWS;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_CACHE_TTL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_MAX_RETRIES;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.NULL_STRING_LITERAL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_INTERVAL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_ROWS;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_SIZE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_IGNORE_NULL_VALUE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_PARALLELISM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.TABLE_NAME;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_QUORUM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_ZNODE_PARENT;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.PROPERTIES_PREFIX;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseWriteOptions;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.validatePrimaryKey;
import static org.apache.flink.table.factories.FactoryUtil.createTableFactoryHelper;
/** HBase connector factory. */
@Internal
public class HBase1DynamicTableFactory
implements DynamicTableSourceFactory, DynamicTableSinkFactory {
private static final String IDENTIFIER = "hbase-1.4";
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseClientConf = getHBaseConfiguration(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema =
HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
LookupCache cache = null;
// Backward compatible to legacy caching options
if (tableOptions.get(LOOKUP_CACHE_MAX_ROWS) > 0
&& tableOptions.get(LOOKUP_CACHE_TTL).compareTo(Duration.ZERO) > 0) {
cache =
DefaultLookupCache.newBuilder()
.maximumSize(tableOptions.get(LOOKUP_CACHE_MAX_ROWS))
.expireAfterWrite(tableOptions.get(LOOKUP_CACHE_TTL))
.build();
}
if (tableOptions
.get(LookupOptions.CACHE_TYPE)
.equals(LookupOptions.LookupCacheType.PARTIAL)) {
cache = DefaultLookupCache.fromConfig(tableOptions);
}
return new HBaseDynamicTableSource(
hbaseClientConf,
tableName,
hbaseSchema,
nullStringLiteral,
tableOptions.get(LookupOptions.MAX_RETRIES),
cache);
}
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
return new HBaseDynamicTableSink(
tableName,
context.getPhysicalRowDataType(),
hbaseConf,
hBaseWriteOptions,
nullStringLiteral);
}
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> set = new HashSet<>();
set.add(TABLE_NAME);
set.add(ZOOKEEPER_QUORUM);
return set;
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> set = new HashSet<>();
set.add(ZOOKEEPER_ZNODE_PARENT);
set.add(NULL_STRING_LITERAL);
set.add(SINK_BUFFER_FLUSH_MAX_SIZE);
set.add(SINK_BUFFER_FLUSH_MAX_ROWS);
set.add(SINK_BUFFER_FLUSH_INTERVAL);
set.add(SINK_IGNORE_NULL_VALUE);
set.add(SINK_PARALLELISM);
set.add(LOOKUP_ASYNC);
set.add(LOOKUP_CACHE_MAX_ROWS);
set.add(LOOKUP_CACHE_TTL);
set.add(LOOKUP_MAX_RETRIES);
set.add(LookupOptions.CACHE_TYPE);
set.add(LookupOptions.MAX_RETRIES);
set.add(LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_ACCESS);
set.add(LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_WRITE);
set.add(LookupOptions.PARTIAL_CACHE_CACHE_MISSING_KEY);
set.add(LookupOptions.PARTIAL_CACHE_MAX_ROWS);
return set;
}
@Override
public Set<ConfigOption<?>> forwardOptions() {
return Stream.of(
TABLE_NAME,
ZOOKEEPER_ZNODE_PARENT,
ZOOKEEPER_QUORUM,
NULL_STRING_LITERAL,
SINK_BUFFER_FLUSH_MAX_SIZE,
SINK_BUFFER_FLUSH_MAX_ROWS,
SINK_BUFFER_FLUSH_INTERVAL,
SINK_IGNORE_NULL_VALUE,
LOOKUP_CACHE_MAX_ROWS,
LOOKUP_CACHE_TTL,
LOOKUP_MAX_RETRIES)
.collect(Collectors.toSet());
}
}
| 3,955 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseDynamicTableSource.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.connector.hbase.source.AbstractHBaseDynamicTableSource;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.hadoop.conf.Configuration;
import javax.annotation.Nullable;
import java.util.Objects;
/** HBase table source implementation. */
@Internal
public class HBaseDynamicTableSource extends AbstractHBaseDynamicTableSource {
public HBaseDynamicTableSource(
Configuration conf,
String tableName,
HBaseTableSchema hbaseSchema,
String nullStringLiteral,
int maxRetryTimes,
@Nullable LookupCache cache) {
super(conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, cache);
}
@Override
public DynamicTableSource copy() {
return new HBaseDynamicTableSource(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, cache);
}
@Override
public InputFormat<RowData, ?> getInputFormat() {
return new HBaseRowDataInputFormat(conf, tableName, hbaseSchema, nullStringLiteral);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof HBaseDynamicTableSource)) {
return false;
}
HBaseDynamicTableSource that = (HBaseDynamicTableSource) o;
return Objects.equals(conf, that.conf)
&& Objects.equals(tableName, that.tableName)
&& Objects.equals(hbaseSchema, that.hbaseSchema)
&& Objects.equals(nullStringLiteral, that.nullStringLiteral)
&& Objects.equals(maxRetryTimes, that.maxRetryTimes)
&& Objects.equals(cache, that.cache);
}
@Override
public int hashCode() {
return Objects.hash(conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, cache);
}
}
| 3,956 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowDataInputFormat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.source;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.connector.hbase.util.HBaseSerde;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.data.RowData;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* {@link InputFormat} subclass that wraps the access for HTables. Returns the result as {@link
* RowData}
*/
public class HBaseRowDataInputFormat extends AbstractTableInputFormat<RowData> {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(HBaseRowDataInputFormat.class);
private final String tableName;
private final HBaseTableSchema schema;
private final String nullStringLiteral;
private transient HBaseSerde serde;
public HBaseRowDataInputFormat(
org.apache.hadoop.conf.Configuration conf,
String tableName,
HBaseTableSchema schema,
String nullStringLiteral) {
super(conf);
this.tableName = tableName;
this.schema = schema;
this.nullStringLiteral = nullStringLiteral;
}
@Override
protected void initTable() throws IOException {
this.serde = new HBaseSerde(schema, nullStringLiteral);
if (table == null) {
connectToTable();
}
if (table != null && scan == null) {
scan = getScanner();
}
}
@Override
protected Scan getScanner() {
return serde.createScan();
}
@Override
public String getTableName() {
return tableName;
}
@Override
protected RowData mapResultToOutType(Result res) {
return serde.convertToReusedRow(res);
}
private void connectToTable() throws IOException {
connection = ConnectionFactory.createConnection(getHadoopConfiguration());
TableName name = TableName.valueOf(tableName);
if (!connection.getAdmin().tableExists(name)) {
throw new TableNotFoundException("HBase table '" + tableName + "' not found.");
}
table = (HTable) connection.getTable(name);
}
}
| 3,957 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/AbstractTableInputFormat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.hbase.source.TableInputSplit;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/** Abstract {@link InputFormat} to read data from HBase tables. */
@Internal
public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, TableInputSplit> {
protected static final Logger LOG = LoggerFactory.getLogger(AbstractTableInputFormat.class);
private static final long serialVersionUID = 1L;
// helper variable to decide whether the input is exhausted or not
protected boolean endReached = false;
protected transient Connection connection = null;
protected transient HTable table = null;
protected transient Scan scan = null;
/** HBase iterator wrapper. */
protected ResultScanner resultScanner = null;
protected byte[] currentRow;
protected long scannedRows;
// Configuration is not serializable
protected byte[] serializedConfig;
public AbstractTableInputFormat(org.apache.hadoop.conf.Configuration hConf) {
serializedConfig = HBaseConfigurationUtil.serializeConfiguration(hConf);
}
/**
* Creates a {@link Scan} object and opens the {@link HTable} connection to initialize the HBase
* table.
*
* @throws IOException Thrown, if the connection could not be opened due to an I/O problem.
*/
protected abstract void initTable() throws IOException;
/**
* Returns an instance of Scan that retrieves the required subset of records from the HBase
* table.
*
* @return The appropriate instance of Scan for this use case.
*/
protected abstract Scan getScanner();
/**
* What table is to be read.
*
* <p>Per instance of a TableInputFormat derivative only a single table name is possible.
*
* @return The name of the table
*/
protected abstract String getTableName();
/**
* HBase returns an instance of {@link Result}.
*
* <p>This method maps the returned {@link Result} instance into the output type {@link T}.
*
* @param r The Result instance from HBase that needs to be converted
* @return The appropriate instance of {@link T} that contains the data of Result.
*/
protected abstract T mapResultToOutType(Result r);
@Override
public void configure(Configuration parameters) {}
protected org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
return HBaseConfigurationUtil.deserializeConfiguration(
serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
}
/**
* Creates a {@link Scan} object and opens the {@link HTable} connection. The connection is
* opened in this method and closed in {@link #close()}.
*
* @param split The split to be opened.
* @throws IOException Thrown, if the spit could not be opened due to an I/O problem.
*/
@Override
public void open(TableInputSplit split) throws IOException {
initTable();
if (split == null) {
throw new IOException("Input split is null!");
}
logSplitInfo("opening", split);
// set scan range
currentRow = split.getStartRow();
scan.setStartRow(currentRow);
scan.setStopRow(split.getEndRow());
resultScanner = table.getScanner(scan);
endReached = false;
scannedRows = 0;
}
public T nextRecord(T reuse) throws IOException {
if (resultScanner == null) {
throw new IOException("No table result scanner provided!");
}
Result res;
try {
res = resultScanner.next();
} catch (Exception e) {
resultScanner.close();
// workaround for timeout on scan
LOG.warn(
"Error after scan of " + scannedRows + " rows. Retry with a new scanner...", e);
scan.withStartRow(currentRow, false);
resultScanner = table.getScanner(scan);
res = resultScanner.next();
}
if (res != null) {
scannedRows++;
currentRow = res.getRow();
return mapResultToOutType(res);
}
endReached = true;
return null;
}
private void logSplitInfo(String action, TableInputSplit split) {
int splitId = split.getSplitNumber();
String splitStart = Bytes.toString(split.getStartRow());
String splitEnd = Bytes.toString(split.getEndRow());
String splitStartKey = splitStart.isEmpty() ? "-" : splitStart;
String splitStopKey = splitEnd.isEmpty() ? "-" : splitEnd;
String[] hostnames = split.getHostnames();
LOG.info(
"{} split (this={})[{}|{}|{}|{}]",
action,
this,
splitId,
hostnames,
splitStartKey,
splitStopKey);
}
@Override
public boolean reachedEnd() throws IOException {
return endReached;
}
@Override
public void close() throws IOException {
LOG.info("Closing split (scanned {} rows)", scannedRows);
currentRow = null;
try {
if (resultScanner != null) {
resultScanner.close();
}
closeTable();
} finally {
resultScanner = null;
}
}
public void closeTable() {
if (table != null) {
try {
table.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase Table.", e);
}
table = null;
}
if (connection != null) {
try {
connection.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase Connection.", e);
}
connection = null;
}
}
@Override
public TableInputSplit[] createInputSplits(final int minNumSplits) throws IOException {
try {
initTable();
// Get the starting and ending row keys for every region in the currently open table
final Pair<byte[][], byte[][]> keys = table.getRegionLocator().getStartEndKeys();
if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
return new TableInputSplit[] {};
}
final byte[] startRow = scan.getStartRow();
final byte[] stopRow = scan.getStopRow();
final boolean scanWithNoLowerBound = startRow.length == 0;
final boolean scanWithNoUpperBound = stopRow.length == 0;
final List<TableInputSplit> splits = new ArrayList<>(minNumSplits);
for (int i = 0; i < keys.getFirst().length; i++) {
final byte[] startKey = keys.getFirst()[i];
final byte[] endKey = keys.getSecond()[i];
final String regionLocation =
table.getRegionLocator()
.getRegionLocation(startKey, false)
.getHostnamePort();
// Test if the given region is to be included in the InputSplit while splitting the
// regions of a table
if (!includeRegionInScan(startKey, endKey)) {
continue;
}
// Find the region on which the given row is being served
final String[] hosts = new String[] {regionLocation};
// Determine if regions contains keys used by the scan
boolean isLastRegion = endKey.length == 0;
if ((scanWithNoLowerBound || isLastRegion || Bytes.compareTo(startRow, endKey) < 0)
&& (scanWithNoUpperBound || Bytes.compareTo(stopRow, startKey) > 0)) {
final byte[] splitStart =
scanWithNoLowerBound || Bytes.compareTo(startKey, startRow) >= 0
? startKey
: startRow;
final byte[] splitStop =
(scanWithNoUpperBound || Bytes.compareTo(endKey, stopRow) <= 0)
&& !isLastRegion
? endKey
: stopRow;
int id = splits.size();
final TableInputSplit split =
new TableInputSplit(
id, hosts, table.getTableName(), splitStart, splitStop);
splits.add(split);
}
}
LOG.info("Created " + splits.size() + " splits");
for (TableInputSplit split : splits) {
logSplitInfo("created", split);
}
return splits.toArray(new TableInputSplit[splits.size()]);
} finally {
closeTable();
}
}
/**
* Test if the given region is to be included in the scan while splitting the regions of a
* table.
*
* @param startKey Start key of the region
* @param endKey End key of the region
* @return true, if this region needs to be included as part of the input (default).
*/
protected boolean includeRegionInScan(final byte[] startKey, final byte[] endKey) {
return true;
}
@Override
public InputSplitAssigner getInputSplitAssigner(TableInputSplit[] inputSplits) {
return new LocatableInputSplitAssigner(inputSplits);
}
@Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) {
return null;
}
@VisibleForTesting
public Connection getConnection() {
return connection;
}
}
| 3,958 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1
|
Create_ds/flink-connector-hbase/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseDynamicTableSink.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase1.sink;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
import org.apache.flink.connector.hbase.sink.WritableMetadata;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.RowKind;
import org.apache.hadoop.conf.Configuration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/** HBase table sink implementation. */
@Internal
public class HBaseDynamicTableSink implements DynamicTableSink, SupportsWritingMetadata {
private final HBaseTableSchema hbaseTableSchema;
private final String nullStringLiteral;
private final Configuration hbaseConf;
private final HBaseWriteOptions writeOptions;
private final String tableName;
private final DataType physicalDataType;
/** Metadata that is appended at the end of a physical sink row. */
private List<String> metadataKeys;
public HBaseDynamicTableSink(
String tableName,
DataType physicalDataType,
Configuration hbaseConf,
HBaseWriteOptions writeOptions,
String nullStringLiteral) {
this.tableName = tableName;
this.physicalDataType = physicalDataType;
this.hbaseTableSchema = HBaseTableSchema.fromDataType(physicalDataType);
this.metadataKeys = Collections.emptyList();
this.hbaseConf = hbaseConf;
this.writeOptions = writeOptions;
this.nullStringLiteral = nullStringLiteral;
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
HBaseSinkFunction<RowData> sinkFunction =
new HBaseSinkFunction<>(
tableName,
hbaseConf,
new RowDataToMutationConverter(
hbaseTableSchema,
physicalDataType,
metadataKeys,
nullStringLiteral,
writeOptions.isIgnoreNullValue()),
writeOptions.getBufferFlushMaxSizeInBytes(),
writeOptions.getBufferFlushMaxRows(),
writeOptions.getBufferFlushIntervalMillis());
return SinkFunctionProvider.of(sinkFunction, writeOptions.getParallelism());
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
// UPSERT mode
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (RowKind kind : requestedMode.getContainedKinds()) {
if (kind != RowKind.UPDATE_BEFORE) {
builder.addContainedKind(kind);
}
}
return builder.build();
}
@Override
public Map<String, DataType> listWritableMetadata() {
return WritableMetadata.list();
}
@Override
public void applyWritableMetadata(List<String> metadataKeys, DataType consumedDataType) {
this.metadataKeys = metadataKeys;
}
@Override
public DynamicTableSink copy() {
return new HBaseDynamicTableSink(
tableName, physicalDataType, hbaseConf, writeOptions, nullStringLiteral);
}
@Override
public String asSummaryString() {
return "HBase";
}
// -------------------------------------------------------------------------------------------
@VisibleForTesting
public HBaseTableSchema getHBaseTableSchema() {
return this.hbaseTableSchema;
}
@VisibleForTesting
public HBaseWriteOptions getWriteOptions() {
return writeOptions;
}
@VisibleForTesting
public Configuration getConfiguration() {
return this.hbaseConf;
}
@VisibleForTesting
public String getTableName() {
return this.tableName;
}
}
| 3,959 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-e2e-tests/src/test/java/org/apache/flink/streaming
|
Create_ds/flink-connector-hbase/flink-connector-hbase-e2e-tests/src/test/java/org/apache/flink/streaming/tests/HBaseITCase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.testframe.container.FlinkContainers;
import org.apache.flink.connector.testframe.container.TestcontainersSettings;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.apache.flink.test.util.SQLJobSubmission;
import org.apache.flink.util.FileUtils;
import org.apache.commons.io.IOUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.testcontainers.containers.Container;
import org.testcontainers.containers.Network;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.testcontainers.shaded.org.hamcrest.MatcherAssert.assertThat;
import static org.testcontainers.shaded.org.hamcrest.Matchers.allOf;
import static org.testcontainers.shaded.org.hamcrest.Matchers.containsInAnyOrder;
import static org.testcontainers.shaded.org.hamcrest.Matchers.containsString;
/** End to end HBase connector tests. */
class HBaseITCase {
private static final String HBASE_E2E_SQL = "hbase_e2e.sql";
private static final Path HADOOP_CP = ResourceTestUtils.getResource(".*hadoop.classpath");
private static final Network NETWORK = Network.newNetwork();
private HBaseContainer hbase;
private FlinkContainers flink;
private List<Path> hadoopCpJars;
private Path connectorJar;
@BeforeEach
void start() throws Exception {
// Prepare all hadoop jars to mock HADOOP_CLASSPATH, use hadoop.classpath which contains all
// hadoop jars
File hadoopClasspathFile = new File(HADOOP_CP.toAbsolutePath().toString());
if (!hadoopClasspathFile.exists()) {
throw new FileNotFoundException(
"File that contains hadoop classpath " + HADOOP_CP + " does not exist.");
}
String classPathContent = FileUtils.readFileUtf8(hadoopClasspathFile);
hadoopCpJars =
Arrays.stream(classPathContent.split(":"))
.map(Paths::get)
.collect(Collectors.toList());
}
@AfterEach
void stop() {
flink.stop();
hbase.stop();
}
@ParameterizedTest
@CsvSource({"1.4.3,hbase-1.4", "2.2.3,hbase-2.2"})
void test(String hbaseVersion, String connectorVersion) throws Exception {
hbase = new HBaseContainer(hbaseVersion).withNetwork(NETWORK).withNetworkAliases("hbase");
flink =
FlinkContainers.builder()
.withTestcontainersSettings(
TestcontainersSettings.builder()
.network(NETWORK)
.dependsOn(hbase)
.build())
.build();
connectorJar = ResourceTestUtils.getResource("sql-" + connectorVersion + ".jar");
hbase.start();
flink.start();
hbase.createTable("source", "family1", "family2");
hbase.createTable("sink", "family1", "family2");
hbase.putData("source", "row1", "family1", "f1c1", "v1");
hbase.putData("source", "row1", "family2", "f2c1", "v2");
hbase.putData("source", "row1", "family2", "f2c2", "v3");
hbase.putData("source", "row2", "family1", "f1c1", "v4");
hbase.putData("source", "row2", "family2", "f2c1", "v5");
hbase.putData("source", "row2", "family2", "f2c2", "v6");
SQLJobSubmission jobSubmission = initSqlJobSubmission(connectorVersion);
flink.submitSQLJob(jobSubmission);
List<String> valueLines = getSinkResult();
assertEquals(6, valueLines.size());
assertThat(
valueLines,
containsInAnyOrder(
allOf(
containsString("row1"),
containsString("family1"),
containsString("f1c1"),
containsString("value1")),
allOf(
containsString("row1"),
containsString("family2"),
containsString("f2c1"),
containsString("v2")),
allOf(
containsString("row1"),
containsString("family2"),
containsString("f2c2"),
containsString("v3")),
allOf(
containsString("row2"),
containsString("family1"),
containsString("f1c1"),
containsString("value4")),
allOf(
containsString("row2"),
containsString("family2"),
containsString("f2c1"),
containsString("v5")),
allOf(
containsString("row2"),
containsString("family2"),
containsString("f2c2"),
containsString("v6"))));
}
private SQLJobSubmission initSqlJobSubmission(String connectorVersion) throws IOException {
List<String> sqlLines = loadSqlStatements(connectorVersion);
return new SQLJobSubmission.SQLJobSubmissionBuilder(sqlLines)
.addJar(connectorJar)
.addJars(hadoopCpJars)
.build();
}
private List<String> getSinkResult() throws Exception {
Container.ExecResult res = hbase.scanTable("sink");
assertEquals(0, res.getExitCode());
return Arrays.stream(res.getStdout().split("\n"))
.filter(line -> line.contains("value="))
.collect(Collectors.toList());
}
private static List<String> loadSqlStatements(String connectorVersion) throws IOException {
try (InputStream is =
HBaseITCase.class.getClassLoader().getResourceAsStream(HBASE_E2E_SQL)) {
if (is == null) {
throw new FileNotFoundException(HBASE_E2E_SQL);
}
List<String> lines = IOUtils.readLines(is, StandardCharsets.UTF_8);
return lines.stream()
.map(line -> line.replace("$HBASE_CONNECTOR", connectorVersion))
.collect(Collectors.toList());
}
}
}
| 3,960 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-e2e-tests/src/main/java/org/apache/flink/streaming
|
Create_ds/flink-connector-hbase/flink-connector-hbase-e2e-tests/src/main/java/org/apache/flink/streaming/tests/HBaseContainer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import com.github.dockerjava.api.command.InspectContainerResponse;
import org.testcontainers.containers.Container;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.images.builder.ImageFromDockerfile;
import java.util.Arrays;
import java.util.stream.Collectors;
/** Standalone containerized HBase instance that builds the image on the fly. */
public class HBaseContainer extends GenericContainer<HBaseContainer> {
private static final String HBASE_BIN = "/opt/hbase/bin";
private static final int MAX_RETRIES = 3;
public HBaseContainer(String hbaseVersion) {
super(getImageFromDockerfile(hbaseVersion));
}
private static ImageFromDockerfile getImageFromDockerfile(String hbaseVersion) {
return new ImageFromDockerfile()
.withDockerfileFromBuilder(
builder ->
builder.from("adoptopenjdk/openjdk8")
.env("HBASE_VERSION", hbaseVersion)
.run(
"export INITRD=no"
+ " && export HBASE_DIST=\"http://archive.apache.org/dist/hbase\""
+ " && apt-get update -y"
+ " && apt-get install -y --no-install-recommends curl"
+ " && cd /opt"
+ " && curl -SL $HBASE_DIST/$HBASE_VERSION/hbase-$HBASE_VERSION-bin.tar.gz"
+ " | tar -x -z && mv hbase-${HBASE_VERSION} hbase")
.expose(2181)
.cmd(
"/bin/sh",
"-c",
String.format(
"nohup %s/start-hbase.sh & sleep infinity",
HBASE_BIN)));
}
@Override
protected void containerIsStarted(InspectContainerResponse containerInfo) {
ExecResult res = null;
for (int i = 0; i < MAX_RETRIES; i++) {
try {
res = execCmd("scan 'hbase:meta'");
if (res.getStdout().contains("hbase:namespace")) {
return;
}
Thread.sleep(5000L);
} catch (Exception e) {
throw new RuntimeException("Failed to verify if container is started.", e);
}
}
throw new IllegalStateException("Failed to start HBase properly:\n" + res);
}
public Container.ExecResult createTable(String table, String... colFamilies) throws Exception {
String createCmd =
String.format("create '%s',", table)
+ Arrays.stream(colFamilies)
.map(cf -> String.format("{NAME=>'%s'}", cf))
.collect(Collectors.joining(","));
return execCmd(createCmd);
}
public Container.ExecResult putData(
String table, String rowKey, String colFamily, String colQualifier, String val)
throws Exception {
String putCmd =
String.format(
"put '%s','%s','%s:%s','%s'", table, rowKey, colFamily, colQualifier, val);
return execCmd(putCmd);
}
public Container.ExecResult scanTable(String table) throws Exception {
String scanCmd = String.format("scan '%s'", table);
return execCmd(scanCmd);
}
private Container.ExecResult execCmd(String cmd) throws Exception {
String hbaseShellCmd = String.format("echo \"%s\" | %s/hbase shell", cmd, HBASE_BIN);
return execInContainer("sh", "-c", hbaseShellCmd);
}
}
| 3,961 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/eclipse/jetty
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/eclipse/jetty/util/JavaVersion.java
|
//
// ========================================================================
// Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd.
// ------------------------------------------------------------------------
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// and Apache License v2.0 which accompanies this distribution.
//
// The Eclipse Public License is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// The Apache License v2.0 is available at
// http://www.opensource.org/licenses/apache2.0.php
//
// You may elect to redistribute this code under either of these licenses.
// ========================================================================
//
package org.eclipse.jetty.util;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Java Version Utility class.
*
* <p>Parses java versions to extract a consistent set of version parts
*/
public class JavaVersion {
/**
* Context attribute that can be set to target a different version of the jvm than the current
* runtime. Acceptable values should correspond to those returned by JavaVersion.getPlatform().
*/
public static final String JAVA_TARGET_PLATFORM = "org.eclipse.jetty.javaTargetPlatform";
/** Regex for Java version numbers. */
private static final String VSTR_FORMAT = "(?<VNUM>[1-9][0-9]*(?:(?:\\.0)*\\.[0-9]+)*).*";
static final Pattern VSTR_PATTERN = Pattern.compile(VSTR_FORMAT);
public static final JavaVersion VERSION =
parse(
System.getProperty(
"java.runtime.version", System.getProperty("java.version", "1.8")));
public static JavaVersion parse(String v) {
Matcher m = VSTR_PATTERN.matcher(v);
if (!m.matches() || m.group("VNUM") == null) {
System.err.println("ERROR: Invalid version string: '" + v + "'");
return new JavaVersion(v + "-UNKNOWN", 8, 1, 8, 0);
}
// $VNUM is a dot-separated list of integers of arbitrary length
String[] split = m.group("VNUM").split("\\.");
int[] version = new int[split.length];
for (int i = 0; i < split.length; i++) {
version[i] = Integer.parseInt(split[i]);
}
return new JavaVersion(
v,
(version[0] >= 9 || version.length == 1) ? version[0] : version[1],
version[0],
version.length > 1 ? version[1] : 0,
version.length > 2 ? version[2] : 0);
}
private final String version;
private final int platform;
private final int major;
private final int minor;
private final int micro;
private JavaVersion(String version, int platform, int major, int minor, int micro) {
this.version = version;
this.platform = platform;
this.major = major;
this.minor = minor;
this.micro = micro;
}
/** @return the string from which this JavaVersion was created */
public String getVersion() {
return version;
}
/**
* Returns the Java Platform version, such as {@code 8} for JDK 1.8.0_92 and {@code 9} for JDK
* 9.2.4.
*
* @return the Java Platform version
*/
public int getPlatform() {
return platform;
}
/**
* Returns the major number version, such as {@code 1} for JDK 1.8.0_92 and {@code 9} for JDK
* 9.2.4.
*
* @return the major number version
*/
public int getMajor() {
return major;
}
/**
* Returns the minor number version, such as {@code 8} for JDK 1.8.0_92 and {@code 2} for JDK
* 9.2.4.
*
* @return the minor number version
*/
public int getMinor() {
return minor;
}
/**
* Returns the micro number version (aka security number), such as {@code 0} for JDK 1.8.0_92
* and {@code 4} for JDK 9.2.4.
*
* @return the micro number version
*/
public int getMicro() {
return micro;
}
/**
* Returns the update number version, such as {@code 92} for JDK 1.8.0_92 and {@code 0} for JDK
* 9.2.4.
*
* @return the update number version
*/
@Deprecated
public int getUpdate() {
return 0;
}
/**
* Returns the remaining string after the version numbers, such as {@code -internal} for JDK
* 1.8.0_92-internal and {@code -ea} for JDK 9-ea, or {@code +13} for JDK 9.2.4+13.
*
* @return the remaining string after the version numbers
*/
@Deprecated
public String getSuffix() {
return null;
}
@Override
public String toString() {
return version;
}
}
| 3,962 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/slf4j
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.slf4j.impl;
/** Fake appender to work around HBase referring to it directly. */
public interface Log4jLoggerAdapter {}
| 3,963 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.architecture;
import org.apache.flink.architecture.common.ImportOptions;
import com.tngtech.archunit.core.importer.ImportOption;
import com.tngtech.archunit.junit.AnalyzeClasses;
import com.tngtech.archunit.junit.ArchTest;
import com.tngtech.archunit.junit.ArchTests;
/** Architecture tests for test code. */
@AnalyzeClasses(
packages = "org.apache.flink.connector.hbase2",
importOptions = {
ImportOption.OnlyIncludeTests.class,
ImportOptions.ExcludeScalaImportOption.class,
ImportOptions.ExcludeShadedImportOption.class
})
public class TestCodeArchitectureTest {
@ArchTest
public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
}
| 3,964 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDynamicTableFactoryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.source.HBaseRowDataLookupFunction;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink;
import org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource;
import org.apache.flink.connector.hbase2.source.HBaseRowDataAsyncLookupFunction;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.connector.source.lookup.AsyncLookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.functions.AsyncLookupFunction;
import org.apache.flink.table.functions.LookupFunction;
import org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext;
import org.apache.flink.table.runtime.connector.source.LookupRuntimeProviderContext;
import org.apache.flink.table.types.DataType;
import org.apache.flink.util.ExceptionUtils;
import org.apache.commons.collections.IteratorUtils;
import org.apache.hadoop.hbase.HConstants;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import static org.apache.flink.table.api.DataTypes.BIGINT;
import static org.apache.flink.table.api.DataTypes.BOOLEAN;
import static org.apache.flink.table.api.DataTypes.DATE;
import static org.apache.flink.table.api.DataTypes.DECIMAL;
import static org.apache.flink.table.api.DataTypes.DOUBLE;
import static org.apache.flink.table.api.DataTypes.FIELD;
import static org.apache.flink.table.api.DataTypes.INT;
import static org.apache.flink.table.api.DataTypes.ROW;
import static org.apache.flink.table.api.DataTypes.STRING;
import static org.apache.flink.table.api.DataTypes.TIME;
import static org.apache.flink.table.api.DataTypes.TIMESTAMP;
import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink;
import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** Unit test for {@link HBase2DynamicTableFactory}. */
public class HBaseDynamicTableFactoryTest {
private static final String FAMILY1 = "f1";
private static final String FAMILY2 = "f2";
private static final String FAMILY3 = "f3";
private static final String FAMILY4 = "f4";
private static final String COL1 = "c1";
private static final String COL2 = "c2";
private static final String COL3 = "c3";
private static final String COL4 = "c4";
private static final String ROWKEY = "rowkey";
@Rule public final ExpectedException thrown = ExpectedException.none();
@SuppressWarnings("rawtypes")
@Test
public void testTableSourceFactory() {
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(FAMILY1, ROW(FIELD(COL1, INT()))),
Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL2, BIGINT()))),
Column.physical(ROWKEY, BIGINT()),
Column.physical(
FAMILY3,
ROW(
FIELD(COL1, DOUBLE()),
FIELD(COL2, BOOLEAN()),
FIELD(COL3, STRING()))),
Column.physical(
FAMILY4,
ROW(
FIELD(COL1, DECIMAL(10, 3)),
FIELD(COL2, TIMESTAMP(3)),
FIELD(COL3, DATE()),
FIELD(COL4, TIME()))));
DynamicTableSource source = createTableSource(schema, getAllOptions());
assertTrue(source instanceof HBaseDynamicTableSource);
HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
int[][] lookupKey = {{2}};
LookupTableSource.LookupRuntimeProvider lookupProvider =
hbaseSource.getLookupRuntimeProvider(new LookupRuntimeProviderContext(lookupKey));
assertTrue(lookupProvider instanceof LookupFunctionProvider);
LookupFunction tableFunction =
((LookupFunctionProvider) lookupProvider).createLookupFunction();
assertTrue(tableFunction instanceof HBaseRowDataLookupFunction);
assertEquals(
"testHBastTable", ((HBaseRowDataLookupFunction) tableFunction).getHTableName());
HBaseTableSchema hbaseSchema = hbaseSource.getHBaseTableSchema();
assertEquals(2, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(Types.LONG), hbaseSchema.getRowKeyTypeInfo());
assertArrayEquals(new String[] {"f1", "f2", "f3", "f4"}, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] {"c1"}, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] {"c1", "c2"}, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] {"c1", "c2", "c3"}, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(
new String[] {"c1", "c2", "c3", "c4"}, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(new DataType[] {INT()}, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(
new DataType[] {INT(), BIGINT()}, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(
new DataType[] {DOUBLE(), BOOLEAN(), STRING()},
hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(
new DataType[] {DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME()},
hbaseSchema.getQualifierDataTypes("f4"));
}
@Test
public void testLookupOptions() {
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
Map<String, String> options = getAllOptions();
options.put("lookup.cache", "PARTIAL");
options.put("lookup.partial-cache.expire-after-access", "15213s");
options.put("lookup.partial-cache.expire-after-write", "18213s");
options.put("lookup.partial-cache.max-rows", "10000");
options.put("lookup.partial-cache.cache-missing-key", "false");
options.put("lookup.max-retries", "15513");
DynamicTableSource source = createTableSource(schema, options);
HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
assertThat(((HBaseDynamicTableSource) source).getMaxRetryTimes()).isEqualTo(15513);
assertThat(hbaseSource.getCache()).isInstanceOf(DefaultLookupCache.class);
DefaultLookupCache cache = (DefaultLookupCache) hbaseSource.getCache();
assertThat(cache)
.isEqualTo(
DefaultLookupCache.newBuilder()
.expireAfterAccess(Duration.ofSeconds(15213))
.expireAfterWrite(Duration.ofSeconds(18213))
.maximumSize(10000)
.cacheMissingKey(false)
.build());
}
@Test
public void testTableSinkFactory() {
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))),
Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))),
Column.physical(
FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))),
Column.physical(
FAMILY4,
ROW(
FIELD(COL1, DECIMAL(10, 3)),
FIELD(COL2, TIMESTAMP(3)),
FIELD(COL3, DATE()),
FIELD(COL4, TIME()))));
DynamicTableSink sink = createTableSink(schema, getAllOptions());
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
assertEquals(0, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
assertArrayEquals(new String[] {"f1", "f2", "f3", "f4"}, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] {"c1", "c2"}, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] {"c1", "c3"}, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] {"c2", "c3"}, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(
new String[] {"c1", "c2", "c3", "c4"}, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(
new DataType[] {DOUBLE(), INT()}, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(
new DataType[] {INT(), BIGINT()}, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(
new DataType[] {BOOLEAN(), STRING()}, hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(
new DataType[] {DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME()},
hbaseSchema.getQualifierDataTypes("f4"));
// verify hadoop Configuration
org.apache.hadoop.conf.Configuration expectedConfiguration =
HBaseConfigurationUtil.getHBaseConfiguration();
expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
expectedConfiguration.set("hbase.security.authentication", "kerberos");
org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
assertEquals(
IteratorUtils.toList(expectedConfiguration.iterator()),
IteratorUtils.toList(actualConfiguration.iterator()));
// verify tableName
assertEquals("testHBastTable", hbaseSink.getTableName());
HBaseWriteOptions expectedWriteOptions =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(1000)
.setBufferFlushIntervalMillis(1000)
.setBufferFlushMaxSizeInBytes(2 * 1024 * 1024)
.build();
HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
assertEquals(expectedWriteOptions, actualWriteOptions);
}
@Test
public void testBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "10mb");
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "10s");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(100)
.setBufferFlushIntervalMillis(10 * 1000)
.setBufferFlushMaxSizeInBytes(10 * 1024 * 1024)
.build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
@Test
public void testSinkIgnoreNullValueOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.ignore-null-value", "true");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertThat(actual.isIgnoreNullValue()).isTrue();
}
@Test
public void testParallelismOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.parallelism", "2");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
SinkFunctionProvider provider =
(SinkFunctionProvider)
hbaseSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertEquals(2, (long) provider.getParallelism().get());
}
@Test
public void testLookupAsync() {
Map<String, String> options = getAllOptions();
options.put("lookup.async", "true");
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))));
DynamicTableSource source = createTableSource(schema, options);
assertTrue(source instanceof HBaseDynamicTableSource);
HBaseDynamicTableSource hbaseSource = (HBaseDynamicTableSource) source;
int[][] lookupKey = {{0}};
LookupTableSource.LookupRuntimeProvider lookupProvider =
hbaseSource.getLookupRuntimeProvider(new LookupRuntimeProviderContext(lookupKey));
assertTrue(lookupProvider instanceof AsyncLookupFunctionProvider);
AsyncLookupFunction asyncTableFunction =
((AsyncLookupFunctionProvider) lookupProvider).createAsyncLookupFunction();
assertTrue(asyncTableFunction instanceof HBaseRowDataAsyncLookupFunction);
assertEquals(
"testHBastTable",
((HBaseRowDataAsyncLookupFunction) asyncTableFunction).getHTableName());
}
@Test
public void testDisabledBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "0");
options.put("sink.buffer-flush.max-rows", "0");
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected =
HBaseWriteOptions.builder()
.setBufferFlushMaxRows(0)
.setBufferFlushIntervalMillis(0)
.setBufferFlushMaxSizeInBytes(0)
.build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
@Test
public void testUnknownOption() {
Map<String, String> options = getAllOptions();
options.put("sink.unknown.key", "unknown-value");
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e, "Unsupported options:\n\nsink.unknown.key")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e, "Unsupported options:\n\nsink.unknown.key")
.isPresent());
}
}
@Test
public void testTypeWithUnsupportedPrecision() {
Map<String, String> options = getAllOptions();
// test unsupported timestamp precision
ResolvedSchema schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(
FAMILY1, ROW(FIELD(COL1, TIMESTAMP(6)), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIMESTAMP type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIMESTAMP type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
// test unsupported time precision
schema =
ResolvedSchema.of(
Column.physical(ROWKEY, STRING()),
Column.physical(FAMILY1, ROW(FIELD(COL1, TIME(6)), FIELD(COL2, INT()))));
try {
createTableSource(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIME type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
try {
createTableSink(schema, options);
fail("Should fail");
} catch (Exception e) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
e,
"The precision 6 of TIME type is out of the range [0, 3]"
+ " supported by HBase connector")
.isPresent());
}
}
private Map<String, String> getAllOptions() {
Map<String, String> options = new HashMap<>();
options.put("connector", "hbase-2.2");
options.put("table-name", "testHBastTable");
options.put("zookeeper.quorum", "localhost:2181");
options.put("zookeeper.znode.parent", "/flink");
options.put("properties.hbase.security.authentication", "kerberos");
return options;
}
}
| 3,965 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTablePlanTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.planner.utils.StreamTableTestUtil;
import org.apache.flink.table.planner.utils.TableTestBase;
import org.junit.Test;
import static org.apache.flink.core.testutils.FlinkMatchers.containsCause;
/** Plan tests for HBase connector, for example, testing projection push down. */
public class HBaseTablePlanTest extends TableTestBase {
private final StreamTableTestUtil util = streamTestUtil(TableConfig.getDefault());
@Test
public void testMultipleRowKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " rowkey INT,"
+ " rowkey2 STRING "
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Row key can't be set multiple times.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testNoneRowKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"HBase table requires to define a row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testInvalidPrimaryKey() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " rowkey STRING, "
+ " PRIMARY KEY (family1) NOT ENFORCED "
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Primary key of HBase table must be defined on the row key field. "
+ "A row key field is defined as an atomic type, "
+ "column families and qualifiers are defined as ROW type.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testUnsupportedDataType() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " col1 ARRAY<STRING>, "
+ " rowkey STRING, "
+ " PRIMARY KEY (rowkey) NOT ENFORCED "
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
thrown().expect(
containsCause(
new IllegalArgumentException(
"Unsupported field type 'ARRAY<STRING>' for HBase.")));
util.verifyExecPlan("SELECT * FROM hTable");
}
@Test
public void testProjectionPushDown() {
util.tableEnv()
.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = 'my_table',"
+ " 'zookeeper.quorum' = 'localhost:2021'"
+ ")");
util.verifyExecPlan("SELECT h.family3, h.family2.col2 FROM hTable AS h");
}
}
| 3,966 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseConnectorITCase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase2.source.AbstractTableInputFormat;
import org.apache.flink.connector.hbase2.source.HBaseRowDataInputFormat;
import org.apache.flink.connector.hbase2.util.HBaseTestBase;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.ScalarFunction;
import org.apache.flink.table.planner.factories.TestValuesTableFactory;
import org.apache.flink.test.util.MiniClusterWithClientResource;
import org.apache.flink.test.util.TestBaseUtils;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.CollectionUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.ClassRule;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static org.apache.flink.table.api.Expressions.$;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
/** IT cases for HBase connector (including source and sink). */
public class HBaseConnectorITCase extends HBaseTestBase {
@ClassRule
public static final MiniClusterWithClientResource MINI_CLUSTER =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(new Configuration())
.build());
// -------------------------------------------------------------------------------------
// HBaseTableSource tests
// -------------------------------------------------------------------------------------
@Test
public void testTableSourceFullScan() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table =
tEnv.sqlQuery(
"SELECT "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[10, Hello-1, 100, 1.01, false, Welt-1]\n"
+ "+I[20, Hello-2, 200, 2.02, true, Welt-2]\n"
+ "+I[30, Hello-3, 300, 3.03, false, Welt-3]\n"
+ "+I[40, null, 400, 4.04, true, Welt-4]\n"
+ "+I[50, Hello-5, 500, 5.05, false, Welt-5]\n"
+ "+I[60, Hello-6, 600, 6.06, true, Welt-6]\n"
+ "+I[70, Hello-7, 700, 7.07, false, Welt-7]\n"
+ "+I[80, null, 800, 8.08, true, Welt-8]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceEmptyTableScan() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_EMPTY_TABLE
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table = tEnv.sqlQuery("SELECT rowkey, h.family1.col1 FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
assertThat(results).isEmpty();
}
@Test
public void testTableSourceProjection() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table =
tEnv.sqlQuery(
"SELECT "
+ " h.family1.col1, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[10, 1.01, false, Welt-1]\n"
+ "+I[20, 2.02, true, Welt-2]\n"
+ "+I[30, 3.03, false, Welt-3]\n"
+ "+I[40, 4.04, true, Welt-4]\n"
+ "+I[50, 5.05, false, Welt-5]\n"
+ "+I[60, 6.06, true, Welt-6]\n"
+ "+I[70, 7.07, false, Welt-7]\n"
+ "+I[80, 8.08, true, Welt-8]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceFieldOrder() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " family1 ROW<col1 INT>"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
Table table = tEnv.sqlQuery("SELECT * FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[1, +I[Hello-1, 100], +I[1.01, false, Welt-1], +I[10]]\n"
+ "+I[2, +I[Hello-2, 200], +I[2.02, true, Welt-2], +I[20]]\n"
+ "+I[3, +I[Hello-3, 300], +I[3.03, false, Welt-3], +I[30]]\n"
+ "+I[4, +I[null, 400], +I[4.04, true, Welt-4], +I[40]]\n"
+ "+I[5, +I[Hello-5, 500], +I[5.05, false, Welt-5], +I[50]]\n"
+ "+I[6, +I[Hello-6, 600], +I[6.06, true, Welt-6], +I[60]]\n"
+ "+I[7, +I[Hello-7, 700], +I[7.07, false, Welt-7], +I[70]]\n"
+ "+I[8, +I[null, 800], +I[8.08, true, Welt-8], +I[80]]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceReadAsByteArray() {
TableEnvironment tEnv = TableEnvironment.create(batchSettings);
tEnv.executeSql(
"CREATE TABLE hTable ("
+ " family2 ROW<col1 BYTES, col2 BYTES>,"
+ " rowkey INT"
+ // no primary key syntax
") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
tEnv.registerFunction("toUTF8", new ToUTF8());
tEnv.registerFunction("toLong", new ToLong());
Table table =
tEnv.sqlQuery(
"SELECT "
+ " toUTF8(h.family2.col1), "
+ " toLong(h.family2.col2) "
+ "FROM hTable AS h");
List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
String expected =
"+I[Hello-1, 100]\n"
+ "+I[Hello-2, 200]\n"
+ "+I[Hello-3, 300]\n"
+ "+I[null, 400]\n"
+ "+I[Hello-5, 500]\n"
+ "+I[Hello-6, 600]\n"
+ "+I[Hello-7, 700]\n"
+ "+I[null, 800]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSink() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register HBase table testTable1 which contains test data
String table1DDL = createHBaseTableDDL(TEST_TABLE_1, false);
tEnv.executeSql(table1DDL);
String table2DDL = createHBaseTableDDL(TEST_TABLE_2, false);
tEnv.executeSql(table2DDL);
String query =
"INSERT INTO "
+ TEST_TABLE_2
+ " SELECT"
+ " rowkey,"
+ " family1,"
+ " family2,"
+ " family3"
+ " FROM "
+ TEST_TABLE_1;
TableResult tableResult = tEnv.executeSql(query);
// wait to finish
tableResult.await();
assertEquals(
"Expected INSERT rowKind", RowKind.INSERT, tableResult.collect().next().getKind());
// start a batch scan job to verify contents in HBase table
TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
batchEnv.executeSql(table2DDL);
List<String> expected = new ArrayList<>();
expected.add("+I[1, 10, Hello-1, 100, 1.01, false, Welt-1]\n");
expected.add("+I[2, 20, Hello-2, 200, 2.02, true, Welt-2]\n");
expected.add("+I[3, 30, Hello-3, 300, 3.03, false, Welt-3]\n");
expected.add("+I[4, 40, null, 400, 4.04, true, Welt-4]\n");
expected.add("+I[5, 50, Hello-5, 500, 5.05, false, Welt-5]\n");
expected.add("+I[6, 60, Hello-6, 600, 6.06, true, Welt-6]\n");
expected.add("+I[7, 70, Hello-7, 700, 7.07, false, Welt-7]\n");
expected.add("+I[8, 80, null, 800, 8.08, true, Welt-8]\n");
Table countTable =
batchEnv.sqlQuery("SELECT COUNT(h.rowkey) FROM " + TEST_TABLE_2 + " AS h");
assertEquals(new Long(expected.size()), countTable.execute().collect().next().getField(0));
Table table =
batchEnv.sqlQuery(
"SELECT "
+ " h.rowkey, "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3 "
+ "FROM "
+ TEST_TABLE_2
+ " AS h");
TableResult tableResult2 = table.execute();
List<Row> results = CollectionUtil.iteratorToList(tableResult2.collect());
TestBaseUtils.compareResultAsText(results, String.join("", expected));
}
@Test
public void testTableSinkWithChangelog() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register values table for source
String dataId =
TestValuesTableFactory.registerData(
Arrays.asList(
Row.ofKind(RowKind.INSERT, 1, Row.of("Hello1")),
Row.ofKind(RowKind.DELETE, 1, Row.of("Hello2")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello1")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello2")),
Row.ofKind(RowKind.INSERT, 2, Row.of("Hello3")),
Row.ofKind(RowKind.DELETE, 2, Row.of("Hello3")),
Row.ofKind(RowKind.INSERT, 1, Row.of("Hello3"))));
tEnv.executeSql(
"CREATE TABLE source_table ("
+ " rowkey INT,"
+ " family1 ROW<name STRING>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'values',"
+ " 'data-id' = '"
+ dataId
+ "',"
+ " 'changelog-mode'='I,UA,UB,D'"
+ ")");
// register HBase table for sink
tEnv.executeSql(
"CREATE TABLE sink_table ("
+ " rowkey INT,"
+ " family1 ROW<name STRING>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_4
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
tEnv.executeSql("INSERT INTO sink_table SELECT * FROM source_table").await();
TableResult result = tEnv.executeSql("SELECT * FROM sink_table");
List<Row> actual = CollectionUtil.iteratorToList(result.collect());
assertThat(actual).isEqualTo(Collections.singletonList(Row.of(1, Row.of("Hello3"))));
}
@Test
public void testTableSinkWithTimestampMetadata() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql(
"CREATE TABLE hTableForSink ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family1 ROW<col1 INT>,"
+ " version TIMESTAMP_LTZ(3) NOT NULL METADATA FROM 'timestamp'"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_5
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
String insert =
"INSERT INTO hTableForSink VALUES"
+ "(1, ROW(1), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(2, ROW(2), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(3, ROW(3), TO_TIMESTAMP_LTZ(1696767943270, 3)),"
+ "(1, ROW(10), TO_TIMESTAMP_LTZ(1696767943269, 3)),"
+ "(2, ROW(20), TO_TIMESTAMP_LTZ(1696767943271, 3))";
tEnv.executeSql(insert).await();
tEnv.executeSql(
"CREATE TABLE hTableForQuery ("
+ " rowkey INT PRIMARY KEY NOT ENFORCED,"
+ " family1 ROW<col1 INT>"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'table-name' = '"
+ TEST_TABLE_5
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
TableResult result = tEnv.executeSql("SELECT rowkey, family1.col1 FROM hTableForQuery");
List<Row> results = CollectionUtil.iteratorToList(result.collect());
String expected = "+I[1, 1]\n+I[2, 20]\n+I[3, 3]\n";
TestBaseUtils.compareResultAsText(results, expected);
}
@Test
public void testTableSourceSinkWithDDL() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register HBase table testTable1 which contains test data
String table1DDL = createHBaseTableDDL(TEST_TABLE_1, true);
tEnv.executeSql(table1DDL);
// register HBase table which is empty
String table3DDL = createHBaseTableDDL(TEST_TABLE_3, true);
tEnv.executeSql(table3DDL);
String insertStatement =
"INSERT INTO "
+ TEST_TABLE_3
+ " SELECT rowkey,"
+ " family1,"
+ " family2,"
+ " family3,"
+ " family4"
+ " from "
+ TEST_TABLE_1;
TableResult tableResult = tEnv.executeSql(insertStatement);
// wait to finish
tableResult.await();
assertEquals(
"Expected INSERT rowKind", RowKind.INSERT, tableResult.collect().next().getKind());
// start a batch scan job to verify contents in HBase table
TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
batchEnv.executeSql(table3DDL);
List<String> expected = new ArrayList<>();
expected.add(
"+I[1, 10, Hello-1, 100, 1.01, false, Welt-1, 2019-08-18T19:00, 2019-08-18, 19:00, 12345678.0001]");
expected.add(
"+I[2, 20, Hello-2, 200, 2.02, true, Welt-2, 2019-08-18T19:01, 2019-08-18, 19:01, 12345678.0002]");
expected.add(
"+I[3, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
expected.add(
"+I[4, 40, null, 400, 4.04, true, Welt-4, 2019-08-18T19:03, 2019-08-18, 19:03, 12345678.0004]");
expected.add(
"+I[5, 50, Hello-5, 500, 5.05, false, Welt-5, 2019-08-19T19:10, 2019-08-19, 19:10, 12345678.0005]");
expected.add(
"+I[6, 60, Hello-6, 600, 6.06, true, Welt-6, 2019-08-19T19:20, 2019-08-19, 19:20, 12345678.0006]");
expected.add(
"+I[7, 70, Hello-7, 700, 7.07, false, Welt-7, 2019-08-19T19:30, 2019-08-19, 19:30, 12345678.0007]");
expected.add(
"+I[8, 80, null, 800, 8.08, true, Welt-8, 2019-08-19T19:40, 2019-08-19, 19:40, 12345678.0008]");
String query =
"SELECT "
+ " h.rowkey, "
+ " h.family1.col1, "
+ " h.family2.col1, "
+ " h.family2.col2, "
+ " h.family3.col1, "
+ " h.family3.col2, "
+ " h.family3.col3, "
+ " h.family4.col1, "
+ " h.family4.col2, "
+ " h.family4.col3, "
+ " h.family4.col4 "
+ " FROM "
+ TEST_TABLE_3
+ " AS h";
TableResult tableResult3 = batchEnv.executeSql(query);
List<String> result =
StreamSupport.stream(
Spliterators.spliteratorUnknownSize(
tableResult3.collect(), Spliterator.ORDERED),
false)
.map(Row::toString)
.sorted()
.collect(Collectors.toList());
assertEquals(expected, result);
}
@Test
public void testHBaseLookupTableSource() {
verifyHBaseLookupJoin(false);
}
@Test
public void testHBaseAsyncLookupTableSource() {
verifyHBaseLookupJoin(true);
}
@Test
public void testTableInputFormatOpenClose() throws IOException {
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
AbstractTableInputFormat<?> inputFormat =
new HBaseRowDataInputFormat(getConf(), TEST_TABLE_1, tableSchema, "null");
inputFormat.open(inputFormat.createInputSplits(1)[0]);
assertNotNull(inputFormat.getConnection());
assertNotNull(inputFormat.getConnection().getTable(TableName.valueOf(TEST_TABLE_1)));
inputFormat.close();
assertNull(inputFormat.getConnection());
}
@Test
public void testTableInputFormatTableExistence() throws IOException {
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
AbstractTableInputFormat<?> inputFormat =
new HBaseRowDataInputFormat(getConf(), TEST_NOT_EXISTS_TABLE, tableSchema, "null");
assertThatThrownBy(() -> inputFormat.createInputSplits(1))
.isExactlyInstanceOf(TableNotFoundException.class);
inputFormat.close();
assertNull(inputFormat.getConnection());
}
@Test
public void testHBaseSinkFunctionTableExistence() throws Exception {
org.apache.hadoop.conf.Configuration hbaseConf =
HBaseConfigurationUtil.getHBaseConfiguration();
hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, getZookeeperQuorum());
hbaseConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase");
HBaseTableSchema tableSchema = new HBaseTableSchema();
tableSchema.addColumn(FAMILY1, F1COL1, byte[].class);
HBaseSinkFunction<RowData> sinkFunction =
new HBaseSinkFunction<>(
TEST_NOT_EXISTS_TABLE,
hbaseConf,
new RowDataToMutationConverter(
tableSchema,
tableSchema.convertToDataType(),
Collections.emptyList(),
"null",
false),
2 * 1024 * 1024,
1000,
1000);
assertThatThrownBy(() -> sinkFunction.open(new Configuration()))
.getRootCause()
.isExactlyInstanceOf(TableNotFoundException.class);
sinkFunction.close();
}
private void verifyHBaseLookupJoin(boolean async) {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql(
"CREATE TABLE "
+ TEST_TABLE_1
+ " ("
+ " family1 ROW<col1 INT>,"
+ " family2 ROW<col1 STRING, col2 BIGINT>,"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>,"
+ " rowkey INT,"
+ " family4 ROW<col1 TIMESTAMP(3), col2 DATE, col3 TIME(3), col4 DECIMAL(12, 4)>,"
+ " PRIMARY KEY (rowkey) NOT ENFORCED"
+ ") WITH ("
+ " 'connector' = 'hbase-2.2',"
+ " 'lookup.async' = '"
+ async
+ "',"
+ " 'table-name' = '"
+ TEST_TABLE_1
+ "',"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "'"
+ ")");
// prepare a source table
String srcTableName = "src";
DataStream<Row> srcDs = execEnv.fromCollection(testData).returns(testTypeInfo);
Table in = tEnv.fromDataStream(srcDs, $("a"), $("b"), $("c"), $("proc").proctime());
tEnv.createTemporaryView(srcTableName, in);
// perform a temporal table join query
String dimJoinQuery =
"SELECT"
+ " a,"
+ " b,"
+ " h.family1.col1,"
+ " h.family2.col1,"
+ " h.family2.col2,"
+ " h.family3.col1,"
+ " h.family3.col2,"
+ " h.family3.col3,"
+ " h.family4.col1,"
+ " h.family4.col2,"
+ " h.family4.col3,"
+ " h.family4.col4 "
+ " FROM src JOIN "
+ TEST_TABLE_1
+ " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.rowkey";
Iterator<Row> collected = tEnv.executeSql(dimJoinQuery).collect();
List<String> result =
StreamSupport.stream(
Spliterators.spliteratorUnknownSize(collected, Spliterator.ORDERED),
false)
.map(Row::toString)
.sorted()
.collect(Collectors.toList());
List<String> expected = new ArrayList<>();
expected.add(
"+I[1, 1, 10, Hello-1, 100, 1.01, false, Welt-1, 2019-08-18T19:00, 2019-08-18, 19:00, 12345678.0001]");
expected.add(
"+I[2, 2, 20, Hello-2, 200, 2.02, true, Welt-2, 2019-08-18T19:01, 2019-08-18, 19:01, 12345678.0002]");
expected.add(
"+I[3, 2, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
expected.add(
"+I[3, 3, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
assertEquals(expected, result);
}
// -------------------------------------------------------------------------------------
// HBase lookup source tests
// -------------------------------------------------------------------------------------
// prepare a source collection.
private static final List<Row> testData = new ArrayList<>();
private static final RowTypeInfo testTypeInfo =
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.LONG, Types.STRING},
new String[] {"a", "b", "c"});
static {
testData.add(Row.of(1, 1L, "Hi"));
testData.add(Row.of(2, 2L, "Hello"));
testData.add(Row.of(3, 2L, "Hello world"));
testData.add(Row.of(3, 3L, "Hello world!"));
}
// ------------------------------- Utilities -------------------------------------------------
/** A {@link ScalarFunction} that maps byte arrays to UTF-8 strings. */
public static class ToUTF8 extends ScalarFunction {
private static final long serialVersionUID = 1L;
public String eval(byte[] bytes) {
return Bytes.toString(bytes);
}
}
/** A {@link ScalarFunction} that maps byte array to longs. */
public static class ToLong extends ScalarFunction {
private static final long serialVersionUID = 1L;
public long eval(byte[] bytes) {
return Bytes.toLong(bytes);
}
}
private String createHBaseTableDDL(String tableName, boolean testTimeAndDecimalTypes) {
StringBuilder family4Statement = new StringBuilder();
if (testTimeAndDecimalTypes) {
family4Statement.append(", family4 ROW<col1 TIMESTAMP(3)");
family4Statement.append(", col2 DATE");
family4Statement.append(", col3 TIME(3)");
family4Statement.append(", col4 DECIMAL(12, 4)");
family4Statement.append("> \n");
}
return "CREATE TABLE "
+ tableName
+ "(\n"
+ " rowkey INT,"
+ " family1 ROW<col1 INT>,\n"
+ " family2 ROW<col1 VARCHAR, col2 BIGINT>,\n"
+ " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>"
+ family4Statement.toString()
+ ") WITH (\n"
+ " 'connector' = 'hbase-2.2',\n"
+ " 'table-name' = '"
+ tableName
+ "',\n"
+ " 'zookeeper.quorum' = '"
+ getZookeeperQuorum()
+ "',\n"
+ " 'zookeeper.znode.parent' = '/hbase' "
+ ")";
}
}
| 3,967 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestBase.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.util;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import static org.apache.flink.table.utils.DateTimeUtils.toInternal;
/** Abstract IT case class for HBase. */
public abstract class HBaseTestBase extends HBaseTestingClusterAutoStarter {
protected static final String TEST_TABLE_1 = "testTable1";
protected static final String TEST_TABLE_2 = "testTable2";
protected static final String TEST_TABLE_3 = "testTable3";
protected static final String TEST_TABLE_4 = "testTable4";
protected static final String TEST_TABLE_5 = "testTable5";
protected static final String TEST_EMPTY_TABLE = "testEmptyTable";
protected static final String TEST_NOT_EXISTS_TABLE = "notExistsTable";
protected static final String ROW_KEY = "rowkey";
protected static final String FAMILY1 = "family1";
protected static final String F1COL1 = "col1";
protected static final String FAMILY2 = "family2";
protected static final String F2COL1 = "col1";
protected static final String F2COL2 = "col2";
protected static final String FAMILY3 = "family3";
protected static final String F3COL1 = "col1";
protected static final String F3COL2 = "col2";
protected static final String F3COL3 = "col3";
protected static final String FAMILY4 = "family4";
protected static final String F4COL1 = "col1";
protected static final String F4COL2 = "col2";
protected static final String F4COL3 = "col3";
protected static final String F4COL4 = "col4";
private static final byte[][] FAMILIES =
new byte[][] {
Bytes.toBytes(FAMILY1),
Bytes.toBytes(FAMILY2),
Bytes.toBytes(FAMILY3),
Bytes.toBytes(FAMILY4)
};
private static final byte[][] SPLIT_KEYS = new byte[][] {Bytes.toBytes(4)};
protected EnvironmentSettings streamSettings;
protected EnvironmentSettings batchSettings;
@BeforeClass
public static void activateHBaseCluster() throws IOException {
prepareTables();
}
@Before
public void before() {
this.streamSettings = EnvironmentSettings.inStreamingMode();
this.batchSettings = EnvironmentSettings.inBatchMode();
}
private static void prepareTables() throws IOException {
createHBaseTable1();
createHBaseTable2();
createHBaseTable3();
createHBaseTable4();
createHBaseTable5();
createEmptyHBaseTable();
}
private static void createHBaseTable1() throws IOException {
// create a table
TableName tableName = TableName.valueOf(TEST_TABLE_1);
createTable(tableName, FAMILIES, SPLIT_KEYS);
// get the HTable instance
Table table = openTable(tableName);
List<Put> puts = new ArrayList<>();
// add some data
puts.add(
putRow(
1,
10,
"Hello-1",
100L,
1.01,
false,
"Welt-1",
Timestamp.valueOf("2019-08-18 19:00:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:00:00"),
new BigDecimal("12345678.0001")));
puts.add(
putRow(
2,
20,
"Hello-2",
200L,
2.02,
true,
"Welt-2",
Timestamp.valueOf("2019-08-18 19:01:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:01:00"),
new BigDecimal("12345678.0002")));
puts.add(
putRow(
3,
30,
"Hello-3",
300L,
3.03,
false,
"Welt-3",
Timestamp.valueOf("2019-08-18 19:02:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:02:00"),
new BigDecimal("12345678.0003")));
puts.add(
putRow(
4,
40,
null,
400L,
4.04,
true,
"Welt-4",
Timestamp.valueOf("2019-08-18 19:03:00"),
Date.valueOf("2019-08-18"),
Time.valueOf("19:03:00"),
new BigDecimal("12345678.0004")));
puts.add(
putRow(
5,
50,
"Hello-5",
500L,
5.05,
false,
"Welt-5",
Timestamp.valueOf("2019-08-19 19:10:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:10:00"),
new BigDecimal("12345678.0005")));
puts.add(
putRow(
6,
60,
"Hello-6",
600L,
6.06,
true,
"Welt-6",
Timestamp.valueOf("2019-08-19 19:20:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:20:00"),
new BigDecimal("12345678.0006")));
puts.add(
putRow(
7,
70,
"Hello-7",
700L,
7.07,
false,
"Welt-7",
Timestamp.valueOf("2019-08-19 19:30:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:30:00"),
new BigDecimal("12345678.0007")));
puts.add(
putRow(
8,
80,
null,
800L,
8.08,
true,
"Welt-8",
Timestamp.valueOf("2019-08-19 19:40:00"),
Date.valueOf("2019-08-19"),
Time.valueOf("19:40:00"),
new BigDecimal("12345678.0008")));
// append rows to table
table.put(puts);
table.close();
}
private static void createHBaseTable2() {
// create a table
TableName tableName = TableName.valueOf(TEST_TABLE_2);
createTable(tableName, FAMILIES, SPLIT_KEYS);
}
private static void createHBaseTable3() {
// create a table
byte[][] families =
new byte[][] {
Bytes.toBytes(FAMILY1),
Bytes.toBytes(FAMILY2),
Bytes.toBytes(FAMILY3),
Bytes.toBytes(FAMILY4),
};
TableName tableName = TableName.valueOf(TEST_TABLE_3);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createHBaseTable4() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_TABLE_4);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createHBaseTable5() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_TABLE_5);
createTable(tableName, families, SPLIT_KEYS);
}
private static void createEmptyHBaseTable() {
// create a table
byte[][] families = new byte[][] {Bytes.toBytes(FAMILY1)};
TableName tableName = TableName.valueOf(TEST_EMPTY_TABLE);
createTable(tableName, families, SPLIT_KEYS);
}
private static Put putRow(
int rowKey,
int f1c1,
String f2c1,
long f2c2,
double f3c1,
boolean f3c2,
String f3c3,
Timestamp f4c1,
Date f4c2,
Time f4c3,
BigDecimal f4c4) {
Put put = new Put(Bytes.toBytes(rowKey));
// family 1
put.addColumn(Bytes.toBytes(FAMILY1), Bytes.toBytes(F1COL1), Bytes.toBytes(f1c1));
// family 2
if (f2c1 != null) {
put.addColumn(Bytes.toBytes(FAMILY2), Bytes.toBytes(F2COL1), Bytes.toBytes(f2c1));
}
put.addColumn(Bytes.toBytes(FAMILY2), Bytes.toBytes(F2COL2), Bytes.toBytes(f2c2));
// family 3
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL1), Bytes.toBytes(f3c1));
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL2), Bytes.toBytes(f3c2));
put.addColumn(Bytes.toBytes(FAMILY3), Bytes.toBytes(F3COL3), Bytes.toBytes(f3c3));
// family 4
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL1), Bytes.toBytes(toInternal(f4c1)));
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL2), Bytes.toBytes(toInternal(f4c2)));
put.addColumn(
Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL3), Bytes.toBytes(toInternal(f4c3)));
put.addColumn(Bytes.toBytes(FAMILY4), Bytes.toBytes(F4COL4), Bytes.toBytes(f4c4));
return put;
}
}
| 3,968 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestingClusterAutoStarter.java
|
/*
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.util;
import org.apache.commons.lang3.Range;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.util.VersionUtil;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* By using this class as the super class of a set of tests you will have a HBase testing cluster
* available that is very suitable for writing tests for scanning and filtering against.
*/
public class HBaseTestingClusterAutoStarter {
private static final Log LOG = LogFactory.getLog(HBaseTestingClusterAutoStarter.class);
private static final Range<String> HADOOP_VERSION_RANGE =
Range.between("2.8.0", "3.0.3", VersionUtil::compareVersions);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Admin admin = null;
private static List<TableName> createdTables = new ArrayList<>();
private static Configuration conf;
protected static void createTable(
TableName tableName, byte[][] columnFamilyName, byte[][] splitKeys) {
assertNotNull("HBaseAdmin is not initialized successfully.", admin);
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] fam : columnFamilyName) {
HColumnDescriptor colDef = new HColumnDescriptor(fam);
desc.addFamily(colDef);
}
try {
admin.createTable(desc, splitKeys);
createdTables.add(tableName);
assertTrue("Fail to create the table", admin.tableExists(tableName));
} catch (IOException e) {
assertNull("Exception found while creating table", e);
}
}
protected static Table openTable(TableName tableName) throws IOException {
Table table = TEST_UTIL.getConnection().getTable(tableName);
assertTrue("Fail to create the table", admin.tableExists(tableName));
return table;
}
private static void deleteTables() {
if (admin != null) {
for (TableName tableName : createdTables) {
try {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
} catch (IOException e) {
assertNull("Exception found deleting the table", e);
}
}
}
}
public static Configuration getConf() {
return conf;
}
public static String getZookeeperQuorum() {
return "localhost:" + TEST_UTIL.getZkCluster().getClientPort();
}
private static void initialize(Configuration c) {
conf = HBaseConfiguration.create(c);
// the default retry number is 15 in hbase-2.2, set 15 for test
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 15);
try {
admin = TEST_UTIL.getAdmin();
} catch (MasterNotRunningException e) {
assertNull("Master is not running", e);
} catch (ZooKeeperConnectionException e) {
assertNull("Cannot connect to ZooKeeper", e);
} catch (IOException e) {
assertNull("IOException", e);
}
}
@BeforeClass
public static void setUp() throws Exception {
// HBase 2.2.3 HBaseTestingUtility works with only a certain range of hadoop versions
String hadoopVersion = System.getProperty("hadoop.version", "2.8.5");
Assume.assumeTrue(HADOOP_VERSION_RANGE.contains(hadoopVersion));
TEST_UTIL.startMiniCluster(1);
// https://issues.apache.org/jira/browse/HBASE-11711
TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);
// Make sure the zookeeper quorum value contains the right port number (varies per run).
LOG.info("Hbase minicluster client port: " + TEST_UTIL.getZkCluster().getClientPort());
TEST_UTIL
.getConfiguration()
.set(
"hbase.zookeeper.quorum",
"localhost:" + TEST_UTIL.getZkCluster().getClientPort());
initialize(TEST_UTIL.getConfiguration());
}
@AfterClass
public static void tearDown() throws Exception {
if (conf == null) {
LOG.info("Skipping Hbase tear down. It was never started");
return;
}
LOG.info("HBase minicluster: Shutting down");
deleteTables();
TEST_UTIL.shutdownMiniCluster();
LOG.info("HBase minicluster: Down");
}
}
| 3,969 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunctionTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.source;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase2.util.HBaseTestBase;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import static org.apache.flink.table.api.DataTypes.BIGINT;
import static org.apache.flink.table.api.DataTypes.DOUBLE;
import static org.apache.flink.table.api.DataTypes.FIELD;
import static org.apache.flink.table.api.DataTypes.INT;
import static org.apache.flink.table.api.DataTypes.ROW;
import static org.apache.flink.table.api.DataTypes.STRING;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/** Test suite for {@link HBaseRowDataAsyncLookupFunction}. */
public class HBaseRowDataAsyncLookupFunctionTest extends HBaseTestBase {
@Test
public void testEval() throws Exception {
HBaseRowDataAsyncLookupFunction lookupFunction = buildRowDataAsyncLookupFunction();
lookupFunction.open(null);
final List<String> result = new ArrayList<>();
int[] rowkeys = {1, 2, 1, 12, 3, 12, 4, 3};
CountDownLatch latch = new CountDownLatch(rowkeys.length);
for (int rowkey : rowkeys) {
CompletableFuture<Collection<RowData>> future = new CompletableFuture<>();
lookupFunction.eval(future, rowkey);
future.whenComplete(
(rs, t) -> {
synchronized (result) {
if (rs.isEmpty()) {
result.add(rowkey + ": null");
} else {
rs.forEach(row -> result.add(rowkey + ": " + row.toString()));
}
}
latch.countDown();
});
}
// this verifies lookup calls are async
assertTrue(result.size() < rowkeys.length);
latch.await();
lookupFunction.close();
List<String> sortResult = new ArrayList<>(result);
Collections.sort(sortResult);
List<String> expected = new ArrayList<>();
expected.add("12: null");
expected.add("12: null");
expected.add("1: +I(1,+I(10),+I(Hello-1,100),+I(1.01,false,Welt-1))");
expected.add("1: +I(1,+I(10),+I(Hello-1,100),+I(1.01,false,Welt-1))");
expected.add("2: +I(2,+I(20),+I(Hello-2,200),+I(2.02,true,Welt-2))");
expected.add("3: +I(3,+I(30),+I(Hello-3,300),+I(3.03,false,Welt-3))");
expected.add("3: +I(3,+I(30),+I(Hello-3,300),+I(3.03,false,Welt-3))");
expected.add("4: +I(4,+I(40),+I(null,400),+I(4.04,true,Welt-4))");
assertEquals(expected, sortResult);
}
private HBaseRowDataAsyncLookupFunction buildRowDataAsyncLookupFunction() {
DataType dataType =
ROW(
FIELD(ROW_KEY, INT()),
FIELD(FAMILY1, ROW(FIELD(F1COL1, INT()))),
FIELD(FAMILY2, ROW(FIELD(F2COL1, STRING()), FIELD(F2COL2, BIGINT()))),
FIELD(
FAMILY3,
ROW(
FIELD(F3COL1, DOUBLE()),
FIELD(F3COL2, DataTypes.BOOLEAN()),
FIELD(F3COL3, STRING()))));
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(dataType);
return new HBaseRowDataAsyncLookupFunction(
getConf(),
TEST_TABLE_1,
hbaseSchema,
"null",
LookupOptions.MAX_RETRIES.defaultValue());
}
}
| 3,970 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2DynamicTableFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink;
import org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.LookupOptions;
import org.apache.flink.table.connector.source.lookup.cache.DefaultLookupCache;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper;
import org.apache.hadoop.conf.Configuration;
import java.time.Duration;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_ASYNC;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_CACHE_MAX_ROWS;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_CACHE_TTL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.LOOKUP_MAX_RETRIES;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.NULL_STRING_LITERAL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_INTERVAL;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_ROWS;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_BUFFER_FLUSH_MAX_SIZE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_IGNORE_NULL_VALUE;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.SINK_PARALLELISM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.TABLE_NAME;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_QUORUM;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptions.ZOOKEEPER_ZNODE_PARENT;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.PROPERTIES_PREFIX;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseWriteOptions;
import static org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.validatePrimaryKey;
import static org.apache.flink.table.factories.FactoryUtil.createTableFactoryHelper;
/** HBase connector factory. */
@Internal
public class HBase2DynamicTableFactory
implements DynamicTableSourceFactory, DynamicTableSinkFactory {
private static final String IDENTIFIER = "hbase-2.2";
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema =
HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
LookupCache cache = null;
// Backward compatible to legacy caching options
if (tableOptions.get(LOOKUP_CACHE_MAX_ROWS) > 0
&& tableOptions.get(LOOKUP_CACHE_TTL).compareTo(Duration.ZERO) > 0) {
cache =
DefaultLookupCache.newBuilder()
.maximumSize(tableOptions.get(LOOKUP_CACHE_MAX_ROWS))
.expireAfterWrite(tableOptions.get(LOOKUP_CACHE_TTL))
.build();
}
if (tableOptions
.get(LookupOptions.CACHE_TYPE)
.equals(LookupOptions.LookupCacheType.PARTIAL)) {
cache = DefaultLookupCache.fromConfig(tableOptions);
}
return new HBaseDynamicTableSource(
hbaseConf,
tableName,
hbaseSchema,
nullStringLiteral,
tableOptions.get(LookupOptions.MAX_RETRIES),
tableOptions.get(LOOKUP_ASYNC),
cache);
}
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
return new HBaseDynamicTableSink(
tableName,
context.getPhysicalRowDataType(),
hbaseConf,
hBaseWriteOptions,
nullStringLiteral);
}
@Override
public String factoryIdentifier() {
return IDENTIFIER;
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> set = new HashSet<>();
set.add(TABLE_NAME);
return set;
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> set = new HashSet<>();
set.add(ZOOKEEPER_ZNODE_PARENT);
set.add(ZOOKEEPER_QUORUM);
set.add(NULL_STRING_LITERAL);
set.add(SINK_BUFFER_FLUSH_MAX_SIZE);
set.add(SINK_BUFFER_FLUSH_MAX_ROWS);
set.add(SINK_BUFFER_FLUSH_INTERVAL);
set.add(SINK_PARALLELISM);
set.add(SINK_IGNORE_NULL_VALUE);
set.add(LOOKUP_ASYNC);
set.add(LOOKUP_CACHE_MAX_ROWS);
set.add(LOOKUP_CACHE_TTL);
set.add(LOOKUP_MAX_RETRIES);
set.add(LookupOptions.CACHE_TYPE);
set.add(LookupOptions.MAX_RETRIES);
set.add(LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_ACCESS);
set.add(LookupOptions.PARTIAL_CACHE_EXPIRE_AFTER_WRITE);
set.add(LookupOptions.PARTIAL_CACHE_CACHE_MISSING_KEY);
set.add(LookupOptions.PARTIAL_CACHE_MAX_ROWS);
return set;
}
@Override
public Set<ConfigOption<?>> forwardOptions() {
return Stream.of(
TABLE_NAME,
ZOOKEEPER_ZNODE_PARENT,
ZOOKEEPER_QUORUM,
NULL_STRING_LITERAL,
LOOKUP_CACHE_MAX_ROWS,
LOOKUP_CACHE_TTL,
LOOKUP_MAX_RETRIES,
SINK_BUFFER_FLUSH_MAX_SIZE,
SINK_BUFFER_FLUSH_MAX_ROWS,
SINK_BUFFER_FLUSH_INTERVAL,
SINK_IGNORE_NULL_VALUE)
.collect(Collectors.toSet());
}
}
| 3,971 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseDynamicTableSource.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.connector.hbase.source.AbstractHBaseDynamicTableSource;
import org.apache.flink.connector.hbase.source.HBaseRowDataLookupFunction;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.connector.source.lookup.AsyncLookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.LookupFunctionProvider;
import org.apache.flink.table.connector.source.lookup.PartialCachingAsyncLookupProvider;
import org.apache.flink.table.connector.source.lookup.PartialCachingLookupProvider;
import org.apache.flink.table.connector.source.lookup.cache.LookupCache;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.hadoop.conf.Configuration;
import javax.annotation.Nullable;
import java.util.Objects;
import static org.apache.flink.util.Preconditions.checkArgument;
/** HBase table source implementation. */
@Internal
public class HBaseDynamicTableSource extends AbstractHBaseDynamicTableSource {
private final boolean lookupAsync;
public HBaseDynamicTableSource(
Configuration conf,
String tableName,
HBaseTableSchema hbaseSchema,
String nullStringLiteral,
int maxRetryTimes,
boolean lookupAsync,
@Nullable LookupCache cache) {
super(conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, cache);
this.lookupAsync = lookupAsync;
}
@Override
public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
checkArgument(
context.getKeys().length == 1 && context.getKeys()[0].length == 1,
"Currently, HBase table can only be lookup by single rowkey.");
checkArgument(
hbaseSchema.getRowKeyName().isPresent(),
"HBase schema must have a row key when used in lookup mode.");
checkArgument(
DataType.getFieldNames(hbaseSchema.convertToDataType())
.get(context.getKeys()[0][0])
.equals(hbaseSchema.getRowKeyName().get()),
"Currently, HBase table only supports lookup by rowkey field.");
if (lookupAsync) {
HBaseRowDataAsyncLookupFunction asyncLookupFunction =
new HBaseRowDataAsyncLookupFunction(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes);
if (cache != null) {
return PartialCachingAsyncLookupProvider.of(asyncLookupFunction, cache);
} else {
return AsyncLookupFunctionProvider.of(asyncLookupFunction);
}
} else {
HBaseRowDataLookupFunction lookupFunction =
new HBaseRowDataLookupFunction(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes);
if (cache != null) {
return PartialCachingLookupProvider.of(lookupFunction, cache);
} else {
return LookupFunctionProvider.of(lookupFunction);
}
}
}
@Override
public DynamicTableSource copy() {
return new HBaseDynamicTableSource(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, lookupAsync, cache);
}
@Override
protected InputFormat<RowData, ?> getInputFormat() {
return new HBaseRowDataInputFormat(conf, tableName, hbaseSchema, nullStringLiteral);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof HBaseDynamicTableSource)) {
return false;
}
HBaseDynamicTableSource that = (HBaseDynamicTableSource) o;
return Objects.equals(conf, that.conf)
&& Objects.equals(tableName, that.tableName)
&& Objects.equals(hbaseSchema, that.hbaseSchema)
&& Objects.equals(nullStringLiteral, that.nullStringLiteral)
&& Objects.equals(maxRetryTimes, that.maxRetryTimes)
&& Objects.equals(cache, that.cache)
&& Objects.equals(lookupAsync, that.lookupAsync);
}
@Override
public int hashCode() {
return Objects.hash(
conf, tableName, hbaseSchema, nullStringLiteral, maxRetryTimes, cache, lookupAsync);
}
}
| 3,972 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataAsyncLookupFunction.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.connector.hbase.util.HBaseSerde;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.functions.AsyncLookupFunction;
import org.apache.flink.table.functions.FunctionContext;
import org.apache.flink.util.StringUtils;
import org.apache.flink.util.concurrent.ExecutorThreadFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ScanResultConsumer;
import org.apache.hadoop.hbase.util.Threads;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* The HBaseRowDataAsyncLookupFunction is an implementation to lookup HBase data by rowkey in async
* fashion. It looks up the result as {@link RowData}.
*/
@Internal
public class HBaseRowDataAsyncLookupFunction extends AsyncLookupFunction {
private static final Logger LOG =
LoggerFactory.getLogger(HBaseRowDataAsyncLookupFunction.class);
private static final long serialVersionUID = 1L;
private final String hTableName;
private final byte[] serializedConfig;
private final HBaseTableSchema hbaseTableSchema;
private final String nullStringLiteral;
private transient AsyncConnection asyncConnection;
private transient AsyncTable<ScanResultConsumer> table;
private transient HBaseSerde serde;
private final int maxRetryTimes;
/** The size for thread pool. */
private static final int THREAD_POOL_SIZE = 16;
public HBaseRowDataAsyncLookupFunction(
Configuration configuration,
String hTableName,
HBaseTableSchema hbaseTableSchema,
String nullStringLiteral,
int maxRetryTimes) {
this.serializedConfig = HBaseConfigurationUtil.serializeConfiguration(configuration);
this.hTableName = hTableName;
this.hbaseTableSchema = hbaseTableSchema;
this.nullStringLiteral = nullStringLiteral;
this.maxRetryTimes = maxRetryTimes;
}
@Override
public void open(FunctionContext context) {
LOG.info("start open ...");
final ExecutorService threadPool =
Executors.newFixedThreadPool(
THREAD_POOL_SIZE,
new ExecutorThreadFactory(
"hbase-async-lookup-worker", Threads.LOGGING_EXCEPTION_HANDLER));
Configuration config = prepareRuntimeConfiguration();
CompletableFuture<AsyncConnection> asyncConnectionFuture =
ConnectionFactory.createAsyncConnection(config);
try {
asyncConnection = asyncConnectionFuture.get();
table = asyncConnection.getTable(TableName.valueOf(hTableName), threadPool);
} catch (InterruptedException | ExecutionException e) {
LOG.error("Exception while creating connection to HBase.", e);
throw new RuntimeException("Cannot create connection to HBase.", e);
}
this.serde = new HBaseSerde(hbaseTableSchema, nullStringLiteral);
LOG.info("end open.");
}
/**
* The invoke entry point of lookup function.
*
* @param keyRow A {@link RowData} that wraps lookup keys. Currently only support single rowkey.
*/
@Override
public CompletableFuture<Collection<RowData>> asyncLookup(RowData keyRow) {
int currentRetry = 0;
CompletableFuture<Collection<RowData>> future = new CompletableFuture<>();
// fetch result
fetchResult(future, currentRetry, ((GenericRowData) keyRow).getField(0));
return future;
}
/**
* Execute async fetch result .
*
* @param resultFuture The result or exception is returned.
* @param currentRetry Current number of retries.
* @param rowKey the lookup key.
*/
private void fetchResult(
CompletableFuture<Collection<RowData>> resultFuture, int currentRetry, Object rowKey) {
Get get = serde.createGet(rowKey);
CompletableFuture<Result> responseFuture = table.get(get);
responseFuture.whenCompleteAsync(
(result, throwable) -> {
if (throwable != null) {
if (throwable instanceof TableNotFoundException) {
LOG.error("Table '{}' not found ", hTableName, throwable);
resultFuture.completeExceptionally(
new RuntimeException(
"HBase table '" + hTableName + "' not found.",
throwable));
} else {
LOG.error(
String.format(
"HBase asyncLookup error, retry times = %d",
currentRetry),
throwable);
if (currentRetry >= maxRetryTimes) {
resultFuture.completeExceptionally(throwable);
} else {
try {
Thread.sleep(1000 * currentRetry);
} catch (InterruptedException e1) {
resultFuture.completeExceptionally(e1);
}
fetchResult(resultFuture, currentRetry + 1, rowKey);
}
}
} else {
if (result.isEmpty()) {
resultFuture.complete(Collections.emptyList());
} else {
resultFuture.complete(
Collections.singletonList(serde.convertToNewRow(result)));
}
}
});
}
private Configuration prepareRuntimeConfiguration() {
// create default configuration from current runtime env (`hbase-site.xml` in classpath)
// first,
// and overwrite configuration using serialized configuration from client-side env
// (`hbase-site.xml` in classpath).
// user params from client-side have the highest priority
Configuration runtimeConfig =
HBaseConfigurationUtil.deserializeConfiguration(
serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
// do validation: check key option(s) in final runtime configuration
if (StringUtils.isNullOrWhitespaceOnly(runtimeConfig.get(HConstants.ZOOKEEPER_QUORUM))) {
LOG.error(
"can not connect to HBase without {} configuration",
HConstants.ZOOKEEPER_QUORUM);
throw new IllegalArgumentException(
"check HBase configuration failed, lost: '"
+ HConstants.ZOOKEEPER_QUORUM
+ "'!");
}
return runtimeConfig;
}
@Override
public void close() {
LOG.info("start close ...");
if (null != table) {
table = null;
}
if (null != asyncConnection) {
try {
asyncConnection.close();
asyncConnection = null;
} catch (IOException e) {
// ignore exception when close.
LOG.warn("exception when close connection", e);
}
}
LOG.info("end close.");
}
@VisibleForTesting
public String getHTableName() {
return hTableName;
}
}
| 3,973 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataInputFormat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.source;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.connector.hbase.util.HBaseSerde;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.data.RowData;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* {@link InputFormat} subclass that wraps the access for HTables. Returns the result as {@link
* RowData}
*/
public class HBaseRowDataInputFormat extends AbstractTableInputFormat<RowData> {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(HBaseRowDataInputFormat.class);
private final String tableName;
private final HBaseTableSchema schema;
private final String nullStringLiteral;
private transient HBaseSerde serde;
public HBaseRowDataInputFormat(
org.apache.hadoop.conf.Configuration conf,
String tableName,
HBaseTableSchema schema,
String nullStringLiteral) {
super(conf);
this.tableName = tableName;
this.schema = schema;
this.nullStringLiteral = nullStringLiteral;
}
@Override
protected void initTable() throws IOException {
this.serde = new HBaseSerde(schema, nullStringLiteral);
if (table == null) {
connectToTable();
}
if (table != null && scan == null) {
scan = getScanner();
}
}
@Override
protected Scan getScanner() {
return serde.createScan();
}
@Override
public String getTableName() {
return tableName;
}
@Override
protected RowData mapResultToOutType(Result res) {
return serde.convertToReusedRow(res);
}
private void connectToTable() throws IOException {
if (connection == null) {
connection = ConnectionFactory.createConnection(getHadoopConfiguration());
}
TableName name = TableName.valueOf(tableName);
if (!connection.getAdmin().tableExists(name)) {
throw new TableNotFoundException("HBase table '" + tableName + "' not found.");
}
table = connection.getTable(name);
regionLocator = connection.getRegionLocator(name);
}
}
| 3,974 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/AbstractTableInputFormat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.source;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.hbase.source.TableInputSplit;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.flink.util.IOUtils;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/** Abstract {@link InputFormat} to read data from HBase tables. */
@Internal
public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, TableInputSplit> {
protected static final Logger LOG = LoggerFactory.getLogger(AbstractTableInputFormat.class);
private static final long serialVersionUID = 1L;
// helper variable to decide whether the input is exhausted or not
protected boolean endReached = false;
protected transient Connection connection = null;
protected transient Table table = null;
protected transient RegionLocator regionLocator = null;
protected transient Scan scan = null;
/** HBase iterator wrapper. */
protected ResultScanner resultScanner = null;
protected byte[] currentRow;
protected long scannedRows;
// Configuration is not serializable
protected byte[] serializedConfig;
public AbstractTableInputFormat(org.apache.hadoop.conf.Configuration hConf) {
serializedConfig = HBaseConfigurationUtil.serializeConfiguration(hConf);
}
/**
* Creates a {@link Scan} object and opens the {@link HTable} connection to initialize the HBase
* table.
*
* @throws IOException Thrown, if the connection could not be opened due to an I/O problem.
*/
protected abstract void initTable() throws IOException;
/**
* Returns an instance of Scan that retrieves the required subset of records from the HBase
* table.
*
* @return The appropriate instance of Scan for this use case.
*/
protected abstract Scan getScanner();
/**
* What table is to be read.
*
* <p>Per instance of a TableInputFormat derivative only a single table name is possible.
*
* @return The name of the table
*/
protected abstract String getTableName();
/**
* HBase returns an instance of {@link Result}.
*
* <p>This method maps the returned {@link Result} instance into the output type {@link T}.
*
* @param r The Result instance from HBase that needs to be converted
* @return The appropriate instance of {@link T} that contains the data of Result.
*/
protected abstract T mapResultToOutType(Result r);
@Override
public void configure(Configuration parameters) {}
protected org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
return HBaseConfigurationUtil.deserializeConfiguration(
serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
}
/**
* Creates a {@link Scan} object and opens the {@link HTable} connection. The connection is
* opened in this method and closed in {@link #close()}.
*
* @param split The split to be opened.
* @throws IOException Thrown, if the spit could not be opened due to an I/O problem.
*/
@Override
public void open(TableInputSplit split) throws IOException {
initTable();
if (split == null) {
throw new IOException("Input split is null!");
}
logSplitInfo("opening", split);
// set scan range
currentRow = split.getStartRow();
scan.setStartRow(currentRow);
scan.setStopRow(split.getEndRow());
resultScanner = table.getScanner(scan);
endReached = false;
scannedRows = 0;
}
@Override
public T nextRecord(T reuse) throws IOException {
if (resultScanner == null) {
throw new IOException("No table result scanner provided!");
}
Result res;
try {
res = resultScanner.next();
} catch (Exception e) {
resultScanner.close();
// workaround for timeout on scan
LOG.warn(
"Error after scan of " + scannedRows + " rows. Retry with a new scanner...", e);
scan.withStartRow(currentRow, false);
resultScanner = table.getScanner(scan);
res = resultScanner.next();
}
if (res != null) {
scannedRows++;
currentRow = res.getRow();
return mapResultToOutType(res);
}
endReached = true;
return null;
}
private void logSplitInfo(String action, TableInputSplit split) {
int splitId = split.getSplitNumber();
String splitStart = Bytes.toString(split.getStartRow());
String splitEnd = Bytes.toString(split.getEndRow());
String splitStartKey = splitStart.isEmpty() ? "-" : splitStart;
String splitStopKey = splitEnd.isEmpty() ? "-" : splitEnd;
String[] hostnames = split.getHostnames();
LOG.info(
"{} split (this={})[{}|{}|{}|{}]",
action,
this,
splitId,
hostnames,
splitStartKey,
splitStopKey);
}
@Override
public boolean reachedEnd() throws IOException {
return endReached;
}
@Override
public void close() throws IOException {
LOG.info("Closing split (scanned {} rows)", scannedRows);
currentRow = null;
IOUtils.closeQuietly(resultScanner);
resultScanner = null;
closeTable();
}
public void closeTable() {
if (table != null) {
try {
table.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase Table.", e);
}
table = null;
}
if (connection != null) {
try {
connection.close();
} catch (IOException e) {
LOG.warn("Exception occurs while closing HBase Connection.", e);
}
connection = null;
}
}
@Override
public TableInputSplit[] createInputSplits(final int minNumSplits) throws IOException {
try {
initTable();
// Get the starting and ending row keys for every region in the currently open table
final Pair<byte[][], byte[][]> keys = regionLocator.getStartEndKeys();
if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
return new TableInputSplit[] {};
}
final byte[] startRow = scan.getStartRow();
final byte[] stopRow = scan.getStopRow();
final boolean scanWithNoLowerBound = startRow.length == 0;
final boolean scanWithNoUpperBound = stopRow.length == 0;
final List<TableInputSplit> splits = new ArrayList<>(minNumSplits);
for (int i = 0; i < keys.getFirst().length; i++) {
final byte[] startKey = keys.getFirst()[i];
final byte[] endKey = keys.getSecond()[i];
final String regionLocation =
regionLocator.getRegionLocation(startKey, false).getHostnamePort();
// Test if the given region is to be included in the InputSplit while splitting the
// regions of a table
if (!includeRegionInScan(startKey, endKey)) {
continue;
}
// Find the region on which the given row is being served
final String[] hosts = new String[] {regionLocation};
// Determine if regions contains keys used by the scan
boolean isLastRegion = endKey.length == 0;
if ((scanWithNoLowerBound || isLastRegion || Bytes.compareTo(startRow, endKey) < 0)
&& (scanWithNoUpperBound || Bytes.compareTo(stopRow, startKey) > 0)) {
final byte[] splitStart =
scanWithNoLowerBound || Bytes.compareTo(startKey, startRow) >= 0
? startKey
: startRow;
final byte[] splitStop =
(scanWithNoUpperBound || Bytes.compareTo(endKey, stopRow) <= 0)
&& !isLastRegion
? endKey
: stopRow;
int id = splits.size();
final TableInputSplit split =
new TableInputSplit(
id, hosts, table.getName().getName(), splitStart, splitStop);
splits.add(split);
}
}
LOG.info("Created " + splits.size() + " splits");
for (TableInputSplit split : splits) {
logSplitInfo("created", split);
}
return splits.toArray(new TableInputSplit[splits.size()]);
} finally {
closeTable();
}
}
/**
* Test if the given region is to be included in the scan while splitting the regions of a
* table.
*
* @param startKey Start key of the region
* @param endKey End key of the region
* @return true, if this region needs to be included as part of the input (default).
*/
protected boolean includeRegionInScan(final byte[] startKey, final byte[] endKey) {
return true;
}
@Override
public InputSplitAssigner getInputSplitAssigner(TableInputSplit[] inputSplits) {
return new LocatableInputSplitAssigner(inputSplits);
}
@Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) {
return null;
}
@VisibleForTesting
public Connection getConnection() {
return connection;
}
}
| 3,975 |
0 |
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2
|
Create_ds/flink-connector-hbase/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseDynamicTableSink.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.hbase2.sink;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
import org.apache.flink.connector.hbase.sink.WritableMetadata;
import org.apache.flink.connector.hbase.util.HBaseTableSchema;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkFunctionProvider;
import org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.RowKind;
import org.apache.hadoop.conf.Configuration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/** HBase table sink implementation. */
@Internal
public class HBaseDynamicTableSink implements DynamicTableSink, SupportsWritingMetadata {
private final String tableName;
private final HBaseTableSchema hbaseTableSchema;
private final Configuration hbaseConf;
private final HBaseWriteOptions writeOptions;
private final String nullStringLiteral;
private final DataType physicalDataType;
/** Metadata that is appended at the end of a physical sink row. */
private List<String> metadataKeys;
public HBaseDynamicTableSink(
String tableName,
DataType physicalDataType,
Configuration hbaseConf,
HBaseWriteOptions writeOptions,
String nullStringLiteral) {
this.tableName = tableName;
this.physicalDataType = physicalDataType;
this.hbaseTableSchema = HBaseTableSchema.fromDataType(physicalDataType);
this.metadataKeys = Collections.emptyList();
this.hbaseConf = hbaseConf;
this.writeOptions = writeOptions;
this.nullStringLiteral = nullStringLiteral;
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
HBaseSinkFunction<RowData> sinkFunction =
new HBaseSinkFunction<>(
tableName,
hbaseConf,
new RowDataToMutationConverter(
hbaseTableSchema,
physicalDataType,
metadataKeys,
nullStringLiteral,
writeOptions.isIgnoreNullValue()),
writeOptions.getBufferFlushMaxSizeInBytes(),
writeOptions.getBufferFlushMaxRows(),
writeOptions.getBufferFlushIntervalMillis());
return SinkFunctionProvider.of(sinkFunction, writeOptions.getParallelism());
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
// UPSERT mode
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (RowKind kind : requestedMode.getContainedKinds()) {
if (kind != RowKind.UPDATE_BEFORE) {
builder.addContainedKind(kind);
}
}
return builder.build();
}
@Override
public Map<String, DataType> listWritableMetadata() {
return WritableMetadata.list();
}
@Override
public void applyWritableMetadata(List<String> metadataKeys, DataType consumedDataType) {
this.metadataKeys = metadataKeys;
}
@Override
public DynamicTableSink copy() {
return new HBaseDynamicTableSink(
tableName, physicalDataType, hbaseConf, writeOptions, nullStringLiteral);
}
@Override
public String asSummaryString() {
return "HBase";
}
// -------------------------------------------------------------------------------------------
@VisibleForTesting
public HBaseTableSchema getHBaseTableSchema() {
return this.hbaseTableSchema;
}
@VisibleForTesting
public HBaseWriteOptions getWriteOptions() {
return writeOptions;
}
@VisibleForTesting
public Configuration getConfiguration() {
return this.hbaseConf;
}
@VisibleForTesting
public String getTableName() {
return this.tableName;
}
}
| 3,976 |
0 |
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientZipkinTracingSample.java
|
package com.netflix.evcache.sample;
import brave.Tracing;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.EVCacheTracingEventListener;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
public class EVCacheClientZipkinTracingSample {
private final EVCache evCache;
private final List<Span> reportedSpans;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* <p>This tells the EVCache library to use the "simple node list provider" for EVCACHE_APP1 (by
* setting the relevant system property), and then it copies the EVC_SAMPLE_DEPLOYMENT environment
* variable to the EVCACHE_APP1-NODES system property.
*
* <p>If the environment variable isn't set, default memcached server is at localhost:11211.
*
* <p>Finally, this initializes "evCache" using EVCache.Builder, specifying the application name
* "EVCACHE_APP1."
*/
public EVCacheClientZipkinTracingSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a defaul.
deploymentDescriptor = "SERVERGROUP1=localhost:11211";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
EVCacheClientPoolManager poolManager = EVCacheClientPoolManager.getInstance();
poolManager.initEVCache("EVCACHE_APP1");
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
EVCacheTracingEventListener tracingEventListener =
new EVCacheTracingEventListener(poolManager, tracing.tracer());
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* <p>See the memcached documentation for what "timeToLive" means. Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now." Large integers
* mean "expires at this Unix timestamp" (seconds since 1/1/1970). Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key could not be retrieved, whether
* due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public void printZipkinSpans() {
System.out.println("--> " + reportedSpans.toString());
}
/** Main Program which does some simple sets and gets. */
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println(
"java -cp "
+ System.getProperty("java.class.path")
+ " com.netflix.evcache.sample.EVCacheClientZipkinTracingSample");
}
try {
EVCacheClientZipkinTracingSample evCacheClientZipkinTracingSample =
new EVCacheClientZipkinTracingSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientZipkinTracingSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientZipkinTracingSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
// Print collected Zipkin Spans
evCacheClientZipkinTracingSample.printZipkinSpans();
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 3,977 |
0 |
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientSample.java
|
package com.netflix.evcache.sample;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import java.util.concurrent.Future;
/**
* Created by senugula on 3/24/16.
* Updated by akpratt on 5/13/16.
*/
/**
* This standalone program demonstrates how to use EVCacheClient for
* set/get operations using memcached running on your local box.
*
* By default, this program expects there to be two memcached processes
* on the local host, on ports 11211 and 11212. They get used as two
* replicas of a single shard each.
*
* You can override this configuration by setting the environment
* variable EVC_SAMPLE_DEPLOYMENT to a string which describes your
* deployment. The format for that string is as described in the EVCache
* documentation for a simple node list provider. It would look like
* this for a two-replica deployment with two shards per replica:
*
* SERVERGROUP1=host1:port1,host2:port2;SERVERGROUP2=host3:port3,host4:port4
*/
public class EVCacheClientSample {
private final EVCache evCache;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* This tells the EVCache library to use the "simple node list
* provider" for EVCACHE_APP1 (by setting the relevant system
* property), and then it copies the EVC_SAMPLE_DEPLOYMENT
* environment variable to the EVCACHE_APP1-NODES system property.
*
* If the environment variable isn't set, default is two shards on
* localhost, on port 11211 and 11212, configured as two replicas with
* one shard each.
*
* Finally, this initializes "evCache" using EVCache.Builder,
* specifying the application name "EVCACHE_APP1."
*/
public EVCacheClientSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a default: two local
// memcached processes configured as two replicas of one shard each.
deploymentDescriptor = "SERVERGROUP1=localhost:11211;SERVERGROUP2=localhost:11212";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* See the memcached documentation for what "timeToLive" means.
* Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now."
* Large integers mean "expires at this Unix timestamp" (seconds since 1/1/1970).
* Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key
* could not be retrieved, whether due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* Main Program which does some simple sets and gets.
*/
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println("java -cp " + System.getProperty("java.class.path") + " com.netflix.evcache.sample.EVCacheClientSample");
}
try {
EVCacheClientSample evCacheClientSample = new EVCacheClientSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 3,978 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/test/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/test/java/com/netflix/evcache/EVCacheTracingEventListenerUnitTests.java
|
package com.netflix.evcache;
import brave.Tracing;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.*;
public class EVCacheTracingEventListenerUnitTests {
List<zipkin2.Span> reportedSpans;
EVCacheTracingEventListener tracingListener;
EVCacheClient mockEVCacheClient;
EVCacheEvent mockEVCacheEvent;
@BeforeMethod
public void resetMocks() {
mockEVCacheClient = mock(EVCacheClient.class);
when(mockEVCacheClient.getServerGroupName()).thenReturn("dummyServerGroupName");
mockEVCacheEvent = mock(EVCacheEvent.class);
when(mockEVCacheEvent.getClients()).thenReturn(Arrays.asList(mockEVCacheClient));
when(mockEVCacheEvent.getCall()).thenReturn(EVCache.Call.GET);
when(mockEVCacheEvent.getAppName()).thenReturn("dummyAppName");
when(mockEVCacheEvent.getCacheName()).thenReturn("dummyCacheName");
when(mockEVCacheEvent.getEVCacheKeys())
.thenReturn(Arrays.asList(new EVCacheKey("dummyAppName", "dummyKey", "dummyCanonicalKey", null, null, null, null)));
when(mockEVCacheEvent.getStatus()).thenReturn("success");
when(mockEVCacheEvent.getDurationInMillis()).thenReturn(1L);
when(mockEVCacheEvent.getTTL()).thenReturn(0);
when(mockEVCacheEvent.getCachedData())
.thenReturn(new CachedData(1, "dummyData".getBytes(), 255));
Map<String, Object> eventAttributes = new HashMap<>();
doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
Object value = arguments[1];
eventAttributes.put(key, value);
return null;
}
})
.when(mockEVCacheEvent)
.setAttribute(any(), any());
doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
return eventAttributes.get(key);
}
})
.when(mockEVCacheEvent)
.getAttribute(any());
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
tracingListener =
new EVCacheTracingEventListener(mock(EVCacheClientPoolManager.class), tracing.tracer());
}
public void verifyCommonTags(List<zipkin2.Span> spans) {
Assert.assertEquals(spans.size(), 1, "Number of expected spans are not matching");
zipkin2.Span span = spans.get(0);
Assert.assertEquals(span.kind(), Span.Kind.CLIENT, "Span Kind are not equal");
Assert.assertEquals(
span.name(), EVCacheTracingEventListener.EVCACHE_SPAN_NAME, "Cache name are not equal");
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.APP_NAME), "APP_NAME tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CACHE_NAME_PREFIX), "CACHE_NAME_PREFIX tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CALL), "CALL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.SERVER_GROUPS), "SERVER_GROUPS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CANONICAL_KEYS), "CANONICAL_KEYS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.STATUS), "STATUS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.LATENCY), "LATENCY tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_TTL), "DATA_TTL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_SIZE), "DATA_SIZE tag is missing");
}
public void verifyErrorTags(List<zipkin2.Span> spans) {
zipkin2.Span span = spans.get(0);
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.ERROR), "ERROR tag is missing");
}
@Test
public void testEVCacheListenerOnComplete() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onComplete(mockEVCacheEvent);
verifyCommonTags(reportedSpans);
}
@Test
public void testEVCacheListenerOnError() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onError(mockEVCacheEvent, new RuntimeException("Unexpected Error"));
verifyCommonTags(reportedSpans);
verifyErrorTags(reportedSpans);
}
}
| 3,979 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingTags.java
|
package com.netflix.evcache;
public class EVCacheTracingTags {
public static String CACHE_NAME_PREFIX = "evcache.cache_name_prefix";
public static String APP_NAME = "evcache.app_name";
public static String STATUS = "evcache.status";
public static String LATENCY = "evcache.latency";
public static String CALL = "evcache.call";
public static String SERVER_GROUPS = "evcache.server_groups";
public static String HASH_KEYS = "evcache.hash_keys";
public static String CANONICAL_KEYS = "evcache.canonical_keys";
public static String DATA_TTL = "evcache.data_ttl";
public static String DATA_SIZE = "evcache.data_size";
public static String ERROR = "evcache.error";
}
| 3,980 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingEventListener.java
|
package com.netflix.evcache;
import brave.Span;
import brave.Tracer;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/** Adds tracing tags for EvCache calls. */
public class EVCacheTracingEventListener implements EVCacheEventListener {
public static String EVCACHE_SPAN_NAME = "evcache";
private static Logger logger = LoggerFactory.getLogger(EVCacheTracingEventListener.class);
private static String CLIENT_SPAN_ATTRIBUTE_KEY = "clientSpanAttributeKey";
private final Tracer tracer;
public EVCacheTracingEventListener(EVCacheClientPoolManager poolManager, Tracer tracer) {
poolManager.addEVCacheEventListener(this);
this.tracer = tracer;
}
@Override
public void onStart(EVCacheEvent e) {
try {
Span clientSpan =
this.tracer.nextSpan().kind(Span.Kind.CLIENT).name(EVCACHE_SPAN_NAME).start();
// Return if tracing has been disabled
if(clientSpan.isNoop()){
return;
}
String appName = e.getAppName();
this.safeTag(clientSpan, EVCacheTracingTags.APP_NAME, appName);
String cacheNamePrefix = e.getCacheName();
this.safeTag(clientSpan, EVCacheTracingTags.CACHE_NAME_PREFIX, cacheNamePrefix);
String call = e.getCall().name();
this.safeTag(clientSpan, EVCacheTracingTags.CALL, call);
/**
* Note - e.getClients() returns a list of clients associated with the EVCacheEvent.
*
* <p>Read operation will have only 1 EVCacheClient as reading from just 1 instance of cache
* is sufficient. Write operations will have appropriate number of clients as each client will
* attempt to write to its cache instance.
*/
String serverGroup;
List<String> serverGroups = new ArrayList<>();
for (EVCacheClient client : e.getClients()) {
serverGroup = client.getServerGroupName();
if (StringUtils.isNotBlank(serverGroup)) {
serverGroups.add("\"" + serverGroup + "\"");
}
}
clientSpan.tag(EVCacheTracingTags.SERVER_GROUPS, serverGroups.stream().collect(Collectors.joining(",", "[", "]")));
/**
* Note - EVCache client creates a hash key if the given canonical key size exceeds 255
* characters.
*
* <p>There have been cases where canonical key size exceeded few megabytes. As caching client
* creates a hash of such canonical keys and optimizes the storage in the cache servers, it is
* safe to annotate hash key instead of canonical key in such cases.
*/
String hashKey;
List<String> hashKeys = new ArrayList<>();
List<String> canonicalKeys = new ArrayList<>();
for (EVCacheKey keyObj : e.getEVCacheKeys()) {
hashKey = keyObj.getHashKey();
if (StringUtils.isNotBlank(hashKey)) {
hashKeys.add("\"" + hashKey + "\"");
} else {
canonicalKeys.add("\"" + keyObj.getCanonicalKey() + "\"");
}
}
if(hashKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.HASH_KEYS,
hashKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
if(canonicalKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.CANONICAL_KEYS,
canonicalKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
/**
* Note - tracer.spanInScope(...) method stores Spans in the thread local object.
*
* <p>As EVCache write operations are asynchronous and quorum based, we are avoiding attaching
* clientSpan with tracer.spanInScope(...) method. Instead, we are storing the clientSpan as
* an object in the EVCacheEvent's attributes.
*/
e.setAttribute(CLIENT_SPAN_ATTRIBUTE_KEY, clientSpan);
} catch (Exception exception) {
logger.error("onStart exception", exception);
}
}
@Override
public void onComplete(EVCacheEvent e) {
try {
this.onFinishHelper(e, null);
} catch (Exception exception) {
logger.error("onComplete exception", exception);
}
}
@Override
public void onError(EVCacheEvent e, Throwable t) {
try {
this.onFinishHelper(e, t);
} catch (Exception exception) {
logger.error("onError exception", exception);
}
}
/**
* On throttle is not a trace event, but it is used to decide whether to throttle. We don't want
* to interfere so always return false.
*/
@Override
public boolean onThrottle(EVCacheEvent e) throws EVCacheException {
return false;
}
private void onFinishHelper(EVCacheEvent e, Throwable t) {
Object clientSpanObj = e.getAttribute(CLIENT_SPAN_ATTRIBUTE_KEY);
// Return if the previously saved Client Span is null
if (clientSpanObj == null) {
return;
}
Span clientSpan = (Span) clientSpanObj;
try {
if (t != null) {
this.safeTag(clientSpan, EVCacheTracingTags.ERROR, t.toString());
}
String status = e.getStatus();
this.safeTag(clientSpan, EVCacheTracingTags.STATUS, status);
long latency = this.getDurationInMicroseconds(e.getDurationInMillis());
clientSpan.tag(EVCacheTracingTags.LATENCY, String.valueOf(latency));
int ttl = e.getTTL();
clientSpan.tag(EVCacheTracingTags.DATA_TTL, String.valueOf(ttl));
CachedData cachedData = e.getCachedData();
if (cachedData != null) {
int cachedDataSize = cachedData.getData().length;
clientSpan.tag(EVCacheTracingTags.DATA_SIZE, String.valueOf(cachedDataSize));
}
} finally {
clientSpan.finish();
}
}
private void safeTag(Span span, String key, String value) {
if (StringUtils.isNotBlank(value)) {
span.tag(key, value);
}
}
private long getDurationInMicroseconds(long durationInMillis) {
// EVCacheEvent returns durationInMillis as -1 if endTime is not available.
if(durationInMillis == -1){
return durationInMillis;
} else {
// Since the underlying EVCacheEvent returns duration in milliseconds we already
// lost the required precision for conversion to microseconds. Multiplication
// by 1000 should suffice here.
return durationInMillis * 1000;
}
}
}
| 3,981 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/Base.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Appender;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.Scheduler;
@SuppressWarnings("unused")
public abstract class Base {
static {
BasicConfigurator.configure();
final Layout LAYOUT = new PatternLayout("%d{ISO8601} %-5p [%c{1}:%M:%L] %m%n");
final Appender STDOUT = new ConsoleAppender(LAYOUT, ConsoleAppender.SYSTEM_OUT);
final org.apache.log4j.Logger ROOT_LOGGER = org.apache.log4j.Logger.getRootLogger();
ROOT_LOGGER.removeAllAppenders();
ROOT_LOGGER.setLevel(Level.WARN);
ROOT_LOGGER.addAppender(STDOUT);
}
private static final Logger log = LoggerFactory.getLogger(Base.class);
protected EVCache evCache = null;
protected EVCacheClientPoolManager manager = null;
protected Properties props = null;
protected Properties getProps() {
if(props != null) return props;
props = new Properties();
initProps();
return props;
}
protected void initProps() {
String hostname = System.getenv("EC2_HOSTNAME");
if(hostname == null) {
props.setProperty("eureka.datacenter", "datacenter");//change to ndc while running on desktop
props.setProperty("eureka.validateInstanceId","false");
props.setProperty("eureka.mt.connect_timeout","1");
props.setProperty("eureka.mt.read_timeout","1");
} else {
props.setProperty("eureka.datacenter", "cloud");
props.setProperty("eureka.validateInstanceId","true");
}
props.setProperty("eureka.environment", "test");
props.setProperty("eureka.region", "us-east-1");
props.setProperty("eureka.appid", "clatency");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheNodeLocator", "ERROR");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheClientUtil", "ERROR");
}
@BeforeSuite
public void setupEnv() {
Properties props = getProps();
try {
for(Entry<Object, Object> prop : props.entrySet()) {
System.setProperty(prop.getKey().toString(), prop.getValue().toString());
}
} catch (Throwable e) {
e.printStackTrace();
log.error(e.getMessage(), e);
}
}
@AfterSuite
public void shutdown() {
manager.shutdown();
}
protected EVCache.Builder getNewBuilder() {
final EVCache.Builder evCacheBuilder = new EVCache.Builder();
if(log.isDebugEnabled()) log.debug("evCacheBuilder : " + evCacheBuilder);
return evCacheBuilder;
}
protected boolean append(int i, EVCache gCache) throws Exception {
String val = ";APP_" + i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.append(key, val, 60 * 60);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("APPEND : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean appendOrAdd(int i, EVCache gCache) throws Exception {
return appendOrAdd(i, gCache, 60 * 60);
}
protected boolean appendOrAdd(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_aa_" + i;
String key = "key_" + i;
EVCacheLatch latch = gCache.appendOrAdd(key, val, null, ttl, Policy.ALL_MINUS_1);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; Latch = " + latch);
boolean status = latch.await(2000, TimeUnit.MILLISECONDS);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; success = " + status);
return true;
}
public boolean add(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_add_"+System.currentTimeMillis();
String key = "key_" + i;
boolean status = gCache.add(key, val, null, 60 * 60);
if(log.isDebugEnabled()) log.debug("ADD : key : " + key + "; success = " + status);
return status;
}
public boolean insert(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean replace(int i, EVCache gCache) throws Exception {
return replace(i, gCache, 60 * 60);
}
protected boolean replace(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_replaced_" + i;
String key = "key_" + i;
EVCacheLatch status = gCache.replace(key, val, null, ttl, Policy.ALL);
boolean opStatus = status.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("REPLACE : key : " + key + "; success = " + opStatus + "; EVCacheLatch = " + status);
return status.getSuccessCount() > 0;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
@SuppressWarnings("deprecation")
protected boolean insertUsingLatch(int i, String app) throws Exception {
String val = "val_" + i;
String key = "key_" + i;
long start = System.currentTimeMillis();
final EVCacheClient[] clients = manager.getEVCacheClientPool(app).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(EVCacheLatch.Policy.ALL, clients.length, app);
for (EVCacheClient client : clients) {
client.set(key, val, 60 * 60, latch);
}
boolean success = latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("SET LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec");
return success;
}
protected boolean deleteLatch(int i, String appName) throws Exception {
long start = System.currentTimeMillis();
String key = "key_" + i;
final EVCacheClient[] clients = manager.getEVCacheClientPool(appName).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(Policy.ALL, clients.length, appName);
for (EVCacheClient client : clients) {
client.delete(key, latch);
}
latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("DELETE LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec" + "; Latch : " + latch);
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String completableFutureGet(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
gCache.<String>getAsync(key).handle((data, ex) -> {
System.out.println(data);
return data;
});
/*
String val = value.get();
if(log.isDebugEnabled()) log.debug("get : key : " + key
+ " completableFuture value = " + value
+ " actual value = " + val);
return val;
*/
return null;
}
public String getWithPolicy(int i, EVCache gCache, Policy policy) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, null, policy);
if(log.isDebugEnabled()) log.debug("get with Policy : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getAsyncBulk(String keys[], EVCache gCache) throws Exception {
final CompletableFuture<Map<String, String>> value = gCache.<String>getAsyncBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value.get();
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public String getObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouchObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
class RemoteCaller implements Runnable {
EVCache gCache;
public RemoteCaller(EVCache c) {
this.gCache = c;
}
public void run() {
try {
int count = 1;
for(int i = 0; i < 100; i++) {
insert(i, gCache);
get(i, gCache);
delete(i, gCache);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
}
| 3,982 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEVCacheTest.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.EVCacheSerializingTranscoder;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.SerializingTranscoder;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
import static org.testng.Assert.*;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEVCacheTest extends Base {
private static final Logger log = LogManager.getLogger(SimpleEVCacheTest.class);
private static final String APP_NAME = "EVCACHE_TEST";
private static final String ALIAS_APP_NAME = "EVCACHE";
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEVCacheTest test = new SimpleEVCacheTest();
test.setProps();
test.setupEnv();
test.testAll();
}
@BeforeSuite
public void setProps() {
BasicConfigurator.resetConfiguration();
BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n")));
Logger.getRootLogger().setLevel(Level.INFO);
Logger.getLogger(SimpleEVCacheTest.class).setLevel(Level.DEBUG);
Logger.getLogger(Base.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheImpl.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClient.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClientPool.class).setLevel(Level.DEBUG);
final Properties props = getProps();
props.setProperty(APP_NAME + ".EVCacheClientPool.zoneAffinity", "false");
props.setProperty(APP_NAME + ".use.simple.node.list.provider", "true");
props.setProperty(APP_NAME + ".EVCacheClientPool.readTimeout", "1000");
props.setProperty(APP_NAME + ".EVCacheClientPool.bulkReadTimeout", "1000");
props.setProperty(APP_NAME + ".max.read.queue.length", "100");
props.setProperty(APP_NAME + ".operation.timeout", "10000");
props.setProperty(APP_NAME + ".throw.exception", "false");
// Setting properties here for testing how we can disable aliases. If there are test case
// that requires aliases, these properties should go under a special condition.
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".alias", ALIAS_APP_NAME);
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".ignoreAlias", "true");
// End alias properties
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
@Test public void testDisablingAlias()
{
// Ensure alias is disabled, we see "EVCACHE_TEST" instead of "EVCACHE" as we have set above.
EVCacheClientPool pool = EVCacheClientPoolManager.getInstance().getEVCacheClientPool(APP_NAME);
assertEquals(pool.getAppName(), APP_NAME);
}
public void testAll() {
try {
EVCacheClientPoolManager.getInstance().initEVCache(APP_NAME);
testDisablingAlias();
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
// testAdd();
testInsert();
// testAppend();
testGet();
testGetWithPolicy();
testEVCacheTranscoder();
// testGetObservable();
// testGetAndTouch();
// testBulk();
// testBulkAndTouch();
// testAppendOrAdd();
// testCompletableFutureGet();
// testCompletableFutureBulk();
// if(i++ % 5 == 0) testDelete();
//Thread.sleep(3000);
} catch (Exception e) {
log.error(e);
}
//Thread.sleep(3000);
}
} catch (Exception e) {
log.error(e);
}
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
super.setupEnv();
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_TEST").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
//insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
//assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetWithPolicy() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getWithPolicy(i, evCache, Policy.QUORUM);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetWithPolicy" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> status : statuses) {
assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testEVCacheTranscoder() throws Exception {
EVCacheSerializingTranscoder evcacheTranscoder = new EVCacheSerializingTranscoder();
SerializingTranscoder serializingTranscoder = new SerializingTranscoder();
// long string to trigger compression
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789";
CachedData evCachedData = evcacheTranscoder.encode(val);
CachedData serializingCachedData = serializingTranscoder.encode(val);
assertTrue(Arrays.equals(evCachedData.getData(), serializingCachedData.getData()), "cacheData same" + evCachedData.toString());
if(log.isDebugEnabled()) log.debug("EVCacheTranscoder result equal to SerializingTranscoder: " + Arrays.equals(evCachedData.getData(), serializingCachedData.getData()));
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error(e);
}
}
}
}
| 3,983 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/MockEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.mockito.Matchers.anyCollection;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Future;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import rx.functions.Action1;
public class MockEVCacheTest {
protected EVCache evCache = null;
private static final Logger log = LoggerFactory.getLogger(MockEVCacheTest.class);
private int loops = 10;
public MockEVCacheTest() {
}
@Test
public void testEVCache() {
this.evCache = new DummyEVCacheImpl().getDummyCache();
assertNotNull(evCache);
}
public boolean insert(int i, EVCache gCache) throws Exception {
String val = "val_"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 24 * 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 24 * 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
@Test(dependsOnMethods = { "testEVCache" })
public void testInsert() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(insert(i, evCache));
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testGet() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertNotNull(vals);
for (int i = 0; i < keys.length; i++) {
String key = keys[i];
String val = vals.get(key);
if (log.isDebugEnabled()) log.debug("key " + key + " returned val " + val);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 24 * 60 * 60);
assertNotNull(vals);
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
if (val == null) {
if (log.isDebugEnabled()) log.debug("key " + key + " returned null");
} else {
assertTrue(val.equals("val_" + i));
}
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testDelete() throws Exception {
for (int i = 0; i < loops; i++) {
delete(i, evCache);
}
}
public void onComplete(EVCacheOperationFuture<String> future) throws Exception {
if (log.isDebugEnabled()) log.debug("getl : key : " + future.getKey() + ", val = " + future.get());
}
static class OnErrorHandler implements Action1<Throwable> {
private final String key;
public OnErrorHandler(String key) {
this.key = key;
}
@Override
public void call(Throwable t1) {
if (log.isDebugEnabled()) log.debug("Could not get value for key: " + key + "; Exception is ", t1);
}
}
static class OnNextHandler implements Action1<String> {
private final String key;
public OnNextHandler(String key) {
this.key = key;
}
@Override
public void call(String val) {
if (log.isDebugEnabled()) log.debug("Observable : key " + key + "; val = " + val);
}
}
/**
* Dummy Cache used for debugging purpose (simple way to disable cache)
*/
private static class DummyEVCacheImpl {
private final EVCache cache;
@SuppressWarnings("unchecked")
public DummyEVCacheImpl() {
cache = mock(EVCache.class);
try {
when(cache.set(anyString(), anyObject(), anyInt())).thenReturn(new Future[0]);
when(cache.get(anyString())).thenReturn("");
when(cache.getAndTouch(anyString(), anyInt())).thenReturn("");
when(cache.getBulk(anyCollection())).thenReturn(Collections.emptyMap());
when(cache.delete(anyString())).thenReturn(new Future[0]);
} catch (EVCacheException e) {
log.error("Unable to create mock EVCache", e);
}
}
public EVCache getDummyCache() {
return cache;
}
}
}
| 3,984 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEurekaEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEurekaEVCacheTest extends Base {
private static final Logger log = LoggerFactory.getLogger(SimpleEurekaEVCacheTest.class);
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEurekaEVCacheTest test = new SimpleEurekaEVCacheTest();
test.setProps();
test.testAll();
}
@BeforeSuite
public void setProps() {
org.apache.log4j.Logger.getLogger(SimpleEurekaEVCacheTest.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(Base.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(EVCacheImpl.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClient.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClientPool.class).setLevel(Level.ERROR);
System.setProperty("evcache.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_AB.EVCacheClientPool.readTimeout", "100000");
System.setProperty("EVCACHE_AB.EVCacheClientPool.bulkReadTimeout", "10000");
System.setProperty("EVCACHE_AB.max.read.queue.length", "100");
System.setProperty("EVCACHE_AB.operation.timeout", "10000");
System.setProperty("EVCACHE_AB.throw.exception", "false");
System.setProperty("EVCACHE_AB.chunk.data", "false");
System.setProperty("NETFLIX_ENVIRONMENT", "test");
System.setProperty("EC2_REGION", "us-east-1");
System.setProperty("evcache.thread.daemon", "true");
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEurekaEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
public void testAll() {
try {
setupClusterDetails();
EVCacheClientPoolManager.getInstance().initEVCache("EVCACHE_AB");
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
testAdd();
testInsert();
testInsertAsync();
//// testAppend();
testGet();
testGetObservable();
testGetAndTouch();
testBulk();
testBulkAndTouch();
testAppendOrAdd();
testCompletableFutureGet();
testCompletableFutureBulk();
if(i++ % 5 == 0) testDelete();
Thread.sleep(1000);
if (i > 100) break;
} catch (Exception e) {
log.error("Exception", e);
}
//Thread.sleep(3000);
}
Thread.sleep(100);
} catch (Exception e) {
log.error("Exception", e);
}
shutdown();
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_AB").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
// assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
if(log.isDebugEnabled()) log.debug("SET : async : i: " + i + " flag = " + flag);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
// for(Future<Boolean> status : statuses) {
// assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
// }
// pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error("Exception", e);
}
}
}
@AfterSuite
public void shutdown() {
pool.shutdown();
super.shutdown();
}
}
| 3,985 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheConnection.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedSelectorException;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.ops.Operation;
public class EVCacheConnection extends MemcachedConnection {
private static final Logger log = LoggerFactory.getLogger(EVCacheConnection.class);
public EVCacheConnection(String name, int bufSize, ConnectionFactory f,
List<InetSocketAddress> a, Collection<ConnectionObserver> obs,
FailureMode fm, OperationFactory opfactory) throws IOException {
super(bufSize, f, a, obs, fm, opfactory);
setName(name);
}
@Override
public void shutdown() throws IOException {
try {
super.shutdown();
for (MemcachedNode qa : getLocator().getAll()) {
if (qa instanceof EVCacheNode) {
((EVCacheNode) qa).shutdown();
}
}
} finally {
if(running) {
running = false;
if(log.isWarnEnabled()) log.warn("Forceful shutdown by interrupting the thread.", new Exception());
interrupt();
}
}
}
public void run() {
while (running) {
try {
handleIO();
} catch (IOException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (CancelledKeyException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (ClosedSelectorException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (IllegalStateException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (ConcurrentModificationException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (Throwable e) {
log.error("SEVERE EVCACHE ISSUE.", e);// This ensures the thread
// doesn't die
}
}
if (log.isDebugEnabled()) log.debug(toString() + " : Shutdown");
}
public String toString() {
return super.toString();
}
protected void addOperation(final MemcachedNode node, final Operation o) {
super.addOperation(node, o);
((EVCacheNode) node).incrOps();
}
@Override
public void addOperations(Map<MemcachedNode, Operation> ops) {
super.addOperations(ops);
for (MemcachedNode node : ops.keySet()) {
((EVCacheNode) node).incrOps();
}
}
@Override
public void enqueueOperation(final String key, final Operation o) {
checkState();
addOperation(key, o);
}
@Override
public CountDownLatch broadcastOperation(BroadcastOpFactory of, Collection<MemcachedNode> nodes) {
for (MemcachedNode node : nodes) {
((EVCacheNode) node).incrOps();
}
return super.broadcastOperation(of, nodes);
}
}
| 3,986 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheNodeMBean.java
|
package net.spy.memcached;
public interface EVCacheNodeMBean extends EVCacheNode {
}
| 3,987 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheMemcachedClient.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.EVCacheGetOperationListener;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.operation.EVCacheAsciiOperationFactory;
import com.netflix.evcache.operation.EVCacheBulkGetFuture;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.ipc.IpcStatus;
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.ops.ConcatenationType;
import net.spy.memcached.ops.DeleteOperation;
import net.spy.memcached.ops.GetAndTouchOperation;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatsOperation;
import net.spy.memcached.ops.StatusCode;
import net.spy.memcached.ops.StoreOperation;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.protocol.binary.BinaryOperationFactory;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.StringUtils;
import net.spy.memcached.protocol.ascii.ExecCmdOperation;
import net.spy.memcached.protocol.ascii.MetaDebugOperation;
import net.spy.memcached.protocol.ascii.MetaGetOperation;
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS",
"SIC_INNER_SHOULD_BE_STATIC_ANON" })
public class EVCacheMemcachedClient extends MemcachedClient {
private static final Logger log = LoggerFactory.getLogger(EVCacheMemcachedClient.class);
private final String appName;
private final Property<Integer> readTimeout;
private final EVCacheClient client;
private final Map<String, Timer> timerMap = new ConcurrentHashMap<String, Timer>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private Property<Long> mutateOperationTimeout;
private final ConnectionFactory connectionFactory;
private final Property<Integer> maxReadDuration, maxWriteDuration;
private final Property<Boolean> enableDebugLogsOnWrongKey;
public EVCacheMemcachedClient(ConnectionFactory cf, List<InetSocketAddress> addrs,
Property<Integer> readTimeout, EVCacheClient client) throws IOException {
super(cf, addrs);
this.connectionFactory = cf;
this.readTimeout = readTimeout;
this.client = client;
this.appName = client.getAppName();
this.maxWriteDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50);
this.maxReadDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.read.duration.metric", Integer.class).orElseGet("evcache.max.read.duration.metric").orElse(20);
this.enableDebugLogsOnWrongKey = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".enable.debug.logs.on.wrongkey", Boolean.class).orElse(false);
}
public NodeLocator getNodeLocator() {
return this.mconn.getLocator();
}
public MemcachedNode getEVCacheNode(String key) {
return this.mconn.getLocator().getPrimary(key);
}
public <T> GetFuture<T> asyncGet(final String key, final Transcoder<T> tc) {
throw new UnsupportedOperationException("asyncGet");
}
// Returns 'true' if keys don't match and logs & reports the error.
// Returns 'false' if keys match.
// TODO: Consider removing this code once we've fixed the Wrong key bug(s)
private boolean isWrongKeyReturned(String original_key, String returned_key) {
if (!original_key.equals(returned_key)) {
// If they keys don't match, log the error along with the key owning host's information and stack trace.
final String original_host = getHostNameByKey(original_key);
final String returned_host = getHostNameByKey(returned_key);
log.error("Wrong key returned. Key - " + original_key + " (Host: " + original_host + ") ; Returned Key "
+ returned_key + " (Host: " + returned_host + ")", new Exception());
client.reportWrongKeyReturned(original_host);
// If we are configured to dynamically switch log levels to DEBUG on a wrong key error, do so here.
if (enableDebugLogsOnWrongKey.get()) {
System.setProperty("log4j.logger.net.spy.memcached", "DEBUG");
}
return true;
}
return false;
}
public <T> EVCacheOperationFuture<T> asyncGet(final String key, final Transcoder<T> tc, EVCacheGetOperationListener<T> listener) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<T> rv = new EVCacheOperationFuture<T>(key, latch, new AtomicReference<T>(null), readTimeout.get().intValue(), executorService, client);
final Operation op = opFact.get(key, new GetOperation.Callback() {
private Future<T> val = null;
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("Getting Key : " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
try {
if (val != null) {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val : " + val.get());
rv.set(val.get(), status);
} else {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val is null");
rv.set(null, status);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
rv.set(null, status);
}
}
@SuppressWarnings("unchecked")
public void gotData(String k, int flags, byte[] data) {
if (isWrongKeyReturned(key, k)) return;
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Read data : key " + key + "; flags : " + flags + "; data : " + data);
if (data != null) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + key + "; val size : " + data.length);
getDataSizeDistributionSummary(EVCacheMetricsFactory.GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
if (tc == null) {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final Transcoder<T> t = (Transcoder<T>) getTranscoder();
val = tcService.decode(t, new CachedData(flags, data, t.getMaxSize()));
}
} else {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
val = tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()));
}
}
} else {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + key + "; val is null" );
}
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.GET_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (val != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (listener != null) rv.addListener(listener);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> EVCacheBulkGetFuture<T> asyncGetBulk(Collection<String> keys,
final Transcoder<T> tc,
EVCacheGetOperationListener<T> listener) {
final Map<String, Future<T>> m = new ConcurrentHashMap<String, Future<T>>();
// Break the gets down into groups by key
final Map<MemcachedNode, Collection<String>> chunks = new HashMap<MemcachedNode, Collection<String>>();
final NodeLocator locator = mconn.getLocator();
//Populate Node and key Map
for (String key : keys) {
StringUtils.validateKey(key, opFact instanceof BinaryOperationFactory);
final MemcachedNode primaryNode = locator.getPrimary(key);
if (primaryNode.isActive()) {
Collection<String> ks = chunks.computeIfAbsent(primaryNode, k -> new ArrayList<>());
ks.add(key);
}
}
final AtomicInteger pendingChunks = new AtomicInteger(chunks.size());
int initialLatchCount = chunks.isEmpty() ? 0 : 1;
final CountDownLatch latch = new CountDownLatch(initialLatchCount);
final Collection<Operation> ops = new ArrayList<Operation>(chunks.size());
final EVCacheBulkGetFuture<T> rv = new EVCacheBulkGetFuture<T>(m, ops, latch, executorService, client);
GetOperation.Callback cb = new GetOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("GetBulk Keys : " + keys + "; Status : " + status.getStatusCode().name() + "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.setStatus(status);
}
@Override
public void gotData(String k, int flags, byte[] data) {
if (data != null) {
getDataSizeDistributionSummary(EVCacheMetricsFactory.BULK_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
}
m.put(k, tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())));
}
@Override
public void complete() {
if (pendingChunks.decrementAndGet() <= 0) {
latch.countDown();
getTimer(EVCacheMetricsFactory.BULK_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (m.size() == keys.size() ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), null, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
}
};
// Now that we know how many servers it breaks down into, and the latch
// is all set up, convert all of these strings collections to operations
final Map<MemcachedNode, Operation> mops = new HashMap<MemcachedNode, Operation>();
for (Map.Entry<MemcachedNode, Collection<String>> me : chunks.entrySet()) {
Operation op = opFact.get(me.getValue(), cb);
mops.put(me.getKey(), op);
ops.add(op);
}
assert mops.size() == chunks.size();
mconn.checkState();
mconn.addOperations(mops);
return rv;
}
public <T> EVCacheOperationFuture<CASValue<T>> asyncGetAndTouch(final String key, final int exp, final Transcoder<T> tc) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<CASValue<T>> rv = new EVCacheOperationFuture<CASValue<T>>(key, latch, new AtomicReference<CASValue<T>>(null), operationTimeout, executorService, client);
Operation op = opFact.getAndTouch(key, exp, new GetAndTouchOperation.Callback() {
private CASValue<T> val = null;
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("GetAndTouch Key : " + key + "; Status : " + status.getStatusCode().name()
+ (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.set(val, status);
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.GET_AND_TOUCH_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (val != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
public void gotData(String k, int flags, long cas, byte[] data) {
if (isWrongKeyReturned(key, k)) return;
if (data != null) getDataSizeDistributionSummary(EVCacheMetricsFactory.GET_AND_TOUCH_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
val = new CASValue<T>(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize())));
}
});
rv.setOperation(op);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> set(String key, int exp, T o, final Transcoder<T> tc) {
return asyncStore(StoreType.set, key, exp, o, tc, null);
}
public OperationFuture<Boolean> set(String key, int exp, Object o) {
return asyncStore(StoreType.set, key, exp, o, transcoder, null);
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> set(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.set, key, exp, o, t, latch);
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> replace(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.replace, key, exp, o, t, latch);
}
public <T> OperationFuture<Boolean> add(String key, int exp, T o, Transcoder<T> tc) {
return asyncStore(StoreType.add, key, exp, o, tc, null);
}
public OperationFuture<Boolean> delete(String key, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final DeleteOperation op = opFact.delete(key, new DeleteOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
rv.set(Boolean.TRUE, status);
}
@Override
public void gotData(long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.DELETE_OPERATION, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> touch(final String key, final int exp, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation op = opFact.touch(key, exp, new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
rv.set(status.isSuccess(), status);
}
@Override
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.TOUCH_OPERATION, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> asyncAppendOrAdd(final String key, int exp, CachedData co, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
if(co != null && co.getData() != null) getDataSizeDistributionSummary(EVCacheMetricsFactory.AOA_OPERATION, EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.IPC_SIZE_OUTBOUND).record(co.getData().length);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation opAppend = opFact.cat(ConcatenationType.append, 0, key, co.getData(), new OperationCallback() {
boolean appendSuccess = false;
@Override
public void receivedStatus(OperationStatus val) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("AddOrAppend Key (Append Operation): " + key + "; Status : " + val.getStatusCode().name()
+ "; Message : " + val.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
if (val.getStatusCode().equals(StatusCode.SUCCESS)) {
rv.set(Boolean.TRUE, val);
appendSuccess = true;
}
}
@Override
public void complete() {
if(appendSuccess) {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_APPEND, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);;
latch.countDown();
rv.signalComplete();
} else {
Operation opAdd = opFact.store(StoreType.add, key, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus addStatus) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("AddOrAppend Key (Add Operation): " + key + "; Status : " + addStatus.getStatusCode().name()
+ "; Message : " + addStatus.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
if(addStatus.isSuccess()) {
appendSuccess = true;
rv.set(addStatus.isSuccess(), addStatus);
} else {
Operation opReappend = opFact.cat(ConcatenationType.append, 0, key, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus retryAppendStatus) {
if (retryAppendStatus.getStatusCode().equals(StatusCode.SUCCESS)) {
rv.set(Boolean.TRUE, retryAppendStatus);
if (log.isDebugEnabled()) log.debug("AddOrAppend Retry append Key (Append Operation): " + key + "; Status : " + retryAppendStatus.getStatusCode().name()
+ "; Message : " + retryAppendStatus.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
} else {
rv.set(Boolean.FALSE, retryAppendStatus);
}
}
public void complete() {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_REAPPEND, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(opReappend);
mconn.enqueueOperation(key, opReappend);
}
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
if(appendSuccess) {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_ADD, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
latch.countDown();
rv.signalComplete();
}
}
});
rv.setOperation(opAdd);
mconn.enqueueOperation(key, opAdd);
}
}
});
rv.setOperation(opAppend);
mconn.enqueueOperation(key, opAppend);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
return rv;
}
private Timer getTimer(String operation, String operationType, OperationStatus status, String hit, String host, long maxDuration) {
String name = ((status != null) ? operation + status.getMessage() : operation );
if(hit != null) name = name + hit;
Timer timer = timerMap.get(name);
if(timer != null) return timer;
final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 4 + (host == null ? 0 : 1));
tagList.addAll(client.getTagList());
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
if(status != null) {
if(status.getStatusCode() == StatusCode.SUCCESS || status.getStatusCode() == StatusCode.ERR_NOT_FOUND || status.getStatusCode() == StatusCode.ERR_EXISTS) {
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, EVCacheMetricsFactory.SUCCESS));
} else {
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, EVCacheMetricsFactory.FAIL));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_STATUS, getStatusCode(status.getStatusCode())));
}
if(hit != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE_HIT, hit));
if(host != null) tagList.add(new BasicTag(EVCacheMetricsFactory.FAILED_HOST, host));
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.IPC_CALL, tagList, Duration.ofMillis(maxDuration));
timerMap.put(name, timer);
return timer;
}
private String getStatusCode(StatusCode sc) {
return EVCacheMetricsFactory.getInstance().getStatusCode(sc);
}
private DistributionSummary getDataSizeDistributionSummary(String operation, String type, String metric) {
DistributionSummary distributionSummary = distributionSummaryMap.get(operation);
if(distributionSummary != null) return distributionSummary;
final List<Tag> tagList = new ArrayList<Tag>(6);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, type));
distributionSummary = EVCacheMetricsFactory.getInstance().getDistributionSummary(metric, tagList);
distributionSummaryMap.put(operation, distributionSummary);
return distributionSummary;
}
private <T> OperationFuture<Boolean> asyncStore(final StoreType storeType, final String key, int exp, T value, Transcoder<T> tc, EVCacheLatch evcacheLatch) {
final CachedData co;
if (value instanceof CachedData) {
co = (CachedData) value;
} else {
co = tc.encode(value);
}
final CountDownLatch latch = new CountDownLatch(1);
final String operationStr;
if (storeType == StoreType.set) {
operationStr = EVCacheMetricsFactory.SET_OPERATION;
} else if (storeType == StoreType.add) {
operationStr = EVCacheMetricsFactory.ADD_OPERATION;
} else {
operationStr = EVCacheMetricsFactory.REPLACE_OPERATION;
}
if(co != null && co.getData() != null) getDataSizeDistributionSummary(operationStr, EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.IPC_SIZE_OUTBOUND).record(co.getData().length);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation op = opFact.store(storeType, key, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus val) {
if (log.isDebugEnabled()) log.debug("Storing Key : " + key + "; Status : " + val.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "") + "; Message : " + val.getMessage()
+ "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.set(val.isSuccess(), val);
if (log.isTraceEnabled() && !val.getStatusCode().equals(StatusCode.SUCCESS)) log.trace(val.getStatusCode().name() + " storing Key : " + key , new Exception());
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
final String host = (((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) || rv.getStatus().getStatusCode().equals(StatusCode.ERR_NO_MEM)) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(operationStr, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public String toString() {
return appName + "-" + client.getZone() + "-" + client.getId();
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> add(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.add, key, exp, o, t, latch);
}
public long incr(String key, long by, long def, int exp) {
return mutate(Mutator.incr, key, by, def, exp);
}
public long decr(String key, long by, long def, int exp) {
return mutate(Mutator.decr, key, by, def, exp);
}
public long mutate(final Mutator m, String key, long by, long def, int exp) {
final String operationStr = m.name();
final long start = System.currentTimeMillis();
final AtomicLong rv = new AtomicLong();
final CountDownLatch latch = new CountDownLatch(1);
final List<OperationStatus> statusList = new ArrayList<OperationStatus>(1);
final Operation op = opFact.mutate(m, key, by, def, exp, new OperationCallback() {
@Override
public void receivedStatus(OperationStatus s) {
statusList.add(s);
rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"));
}
@Override
public void complete() {
latch.countDown();
}
});
mconn.enqueueOperation(key, op);
long retVal = def;
try {
if(mutateOperationTimeout == null) {
mutateOperationTimeout = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".mutate.timeout", Long.class).orElse(connectionFactory.getOperationTimeout());
}
if (!latch.await(mutateOperationTimeout.get(), TimeUnit.MILLISECONDS)) {
if (log.isDebugEnabled()) log.debug("Mutation operation timeout. Will return -1");
retVal = -1;
} else {
retVal = rv.get();
}
} catch (Exception e) {
log.error("Exception on mutate operation : " + operationStr + " Key : " + key + "; by : " + by + "; default : " + def + "; exp : " + exp
+ "; val : " + retVal + "; Elapsed Time - " + (System.currentTimeMillis() - start), e);
}
final OperationStatus status = statusList.size() > 0 ? statusList.get(0) : null;
final String host = ((status != null && status.getStatusCode().equals(StatusCode.TIMEDOUT) && op != null) ? getHostName(op.getHandlingNode().getSocketAddress()) : null);
getTimer(operationStr, EVCacheMetricsFactory.WRITE, status, null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - start), TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug(operationStr + " Key : " + key + "; by : " + by + "; default : " + def + "; exp : " + exp
+ "; val : " + retVal + "; Elapsed Time - " + (System.currentTimeMillis() - start));
return retVal;
}
public void reconnectNode(EVCacheNode evcNode ) {
final long upTime = System.currentTimeMillis() - evcNode.getCreateTime();
if (log.isDebugEnabled()) log.debug("Reconnecting node : " + evcNode + "; UpTime : " + upTime);
if(upTime > 30000) { //not more than once every 30 seconds : TODO make this configurable
final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 2);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.RECONNECT));
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILED_HOST, evcNode.getHostName()));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList);
evcNode.setConnectTime(System.currentTimeMillis());
mconn.queueReconnect(evcNode);
}
}
public int getWriteMetricMaxValue() {
return maxWriteDuration.get().intValue();
}
public int getReadMetricMaxValue() {
return maxReadDuration.get().intValue();
}
private String getHostNameByKey(String key) {
MemcachedNode evcNode = getEVCacheNode(key);
return getHostName(evcNode.getSocketAddress());
}
private String getHostName(SocketAddress sa) {
if (sa == null) return null;
if(sa instanceof InetSocketAddress) {
return ((InetSocketAddress)sa).getHostName();
} else {
return sa.toString();
}
}
public EVCacheItemMetaData metaDebug(String key) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheItemMetaData rv = new EVCacheItemMetaData();
if(opFact instanceof EVCacheAsciiOperationFactory) {
final Operation op = ((EVCacheAsciiOperationFactory)opFact).metaDebug(key, new MetaDebugOperation.Callback() {
public void receivedStatus(OperationStatus status) {
if (!status.isSuccess()) {
if (log.isDebugEnabled()) log.debug("Unsuccessful stat fetch: %s", status);
}
if (log.isDebugEnabled()) log.debug("Getting Meta Debug: " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "") + "; Message : " + status.getMessage());
}
public void debugInfo(String k, String val) {
if (log.isDebugEnabled()) log.debug("key " + k + "; val : " + val);
if(k.equals("exp")) rv.setSecondsLeftToExpire(Long.parseLong(val) * -1);
else if(k.equals("la")) rv.setSecondsSinceLastAccess(Long.parseLong(val));
else if(k.equals("cas")) rv.setCas(Long.parseLong(val));
else if(k.equals("fetch")) rv.setHasBeenFetchedAfterWrite(Boolean.parseBoolean(val));
else if(k.equals("cls")) rv.setSlabClass(Integer.parseInt(val));
else if(k.equals("size")) rv.setSizeInBytes(Integer.parseInt(val));
}
public void complete() {
latch.countDown();
}});
mconn.enqueueOperation(key, op);
try {
if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) {
if (log.isDebugEnabled()) log.debug("meta debug operation timeout. Will return empty opbject.");
}
} catch (Exception e) {
log.error("Exception on meta debug operation : Key : " + key, e);
}
if (log.isDebugEnabled()) log.debug("Meta Debug Data : " + rv);
}
return rv;
}
public Map<SocketAddress, String> execCmd(final String cmd, String[] ips) {
final Map<SocketAddress, String> rv = new HashMap<SocketAddress, String>();
Collection<MemcachedNode> nodes = null;
if(ips == null || ips.length == 0) {
nodes = mconn.getLocator().getAll();
} else {
nodes = new ArrayList<MemcachedNode>(ips.length);
for(String ip : ips) {
for(MemcachedNode node : mconn.getLocator().getAll()) {
if(((InetSocketAddress)node.getSocketAddress()).getAddress().getHostAddress().equals(ip)) {
nodes.add(node);
}
}
}
}
if(nodes != null && !nodes.isEmpty()) {
CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() {
@Override
public Operation newOp(final MemcachedNode n, final CountDownLatch latch) {
final SocketAddress sa = n.getSocketAddress();
return ((EVCacheAsciiOperationFactory)opFact).execCmd(cmd, new ExecCmdOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("cmd : " + cmd + "; MemcachedNode : " + n + "; Status : " + status);
rv.put(sa, status.getMessage());
}
@Override
public void complete() {
latch.countDown();
}
});
}
}, nodes);
try {
blatch.await(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for stats", e);
}
}
return rv;
}
public <T> EVCacheOperationFuture<EVCacheItem<T>> asyncMetaGet(final String key, final Transcoder<T> tc, EVCacheGetOperationListener<T> listener) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<EVCacheItem<T>> rv = new EVCacheOperationFuture<EVCacheItem<T>>(key, latch, new AtomicReference<EVCacheItem<T>>(null), readTimeout.get().intValue(), executorService, client);
if(opFact instanceof EVCacheAsciiOperationFactory) {
final Operation op = ((EVCacheAsciiOperationFactory)opFact).metaGet(key, new MetaGetOperation.Callback() {
private EVCacheItem<T> evItem = new EVCacheItem<T>();
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("Getting Key : " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
try {
if (evItem.getData() != null) {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val : " + evItem);
rv.set(evItem, status);
} else {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val is null");
rv.set(null, status);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
rv.set(null, status);
}
}
@Override
public void gotMetaData(String k, char flag, String fVal) {
if (log.isDebugEnabled()) log.debug("key " + k + "; val : " + fVal + "; flag : " + flag);
if (isWrongKeyReturned(key, k)) return;
switch (flag) {
case 's':
evItem.getItemMetaData().setSizeInBytes(Integer.parseInt(fVal));
break;
case 'c':
evItem.getItemMetaData().setCas(Long.parseLong(fVal));
break;
case 'f':
evItem.setFlag(Integer.parseInt(fVal));
break;
case 'h':
evItem.getItemMetaData().setHasBeenFetchedAfterWrite(fVal.equals("1"));
break;
case 'l':
evItem.getItemMetaData().setSecondsSinceLastAccess(Long.parseLong(fVal));
break;
case 'O':
//opaque = val;
break;
case 't':
final int ttlLeft = Integer.parseInt(fVal);
evItem.getItemMetaData().setSecondsLeftToExpire(ttlLeft);
getDataSizeDistributionSummary(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.INTERNAL_TTL).record(ttlLeft);
break;
default:
break;
}
}
@Override
public void gotData(String k, int flag, byte[] data) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Read data : key " + k + "; flags : " + flag + "; data : " + data);
if (isWrongKeyReturned(key, k)) return;
if (data != null) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + k + "; val size : " + data.length);
getDataSizeDistributionSummary(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
if (tc == null) {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final Transcoder<T> t = (Transcoder<T>) getTranscoder();
final T item = t.decode(new CachedData(flag, data, t.getMaxSize()));
evItem.setData(item);
}
} else {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final T item = tc.decode(new CachedData(flag, data, tc.getMaxSize()));
evItem.setData(item);
}
}
} else {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + k + "; val is null" );
}
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (evItem.getData() != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
mconn.enqueueOperation(key, op);
if (log.isDebugEnabled()) log.debug("Meta_Get Data : " + rv);
}
return rv;
}
}
| 3,988 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheMemcachedNodeROImpl.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.SocketChannel;
import java.util.Collection;
import net.spy.memcached.ops.Operation;
public class EVCacheMemcachedNodeROImpl implements MemcachedNode {
private final MemcachedNode root;
public EVCacheMemcachedNodeROImpl(MemcachedNode n) {
super();
root = n;
}
public String toString() {
return root.toString();
}
public void addOp(Operation op) {
throw new UnsupportedOperationException();
}
public void insertOp(Operation op) {
throw new UnsupportedOperationException();
}
public void connected() {
throw new UnsupportedOperationException();
}
public void copyInputQueue() {
throw new UnsupportedOperationException();
}
public void fillWriteBuffer(boolean optimizeGets) {
throw new UnsupportedOperationException();
}
public void fixupOps() {
throw new UnsupportedOperationException();
}
public int getBytesRemainingToWrite() {
return root.getBytesRemainingToWrite();
}
public SocketChannel getChannel() {
throw new UnsupportedOperationException();
}
public Operation getCurrentReadOp() {
throw new UnsupportedOperationException();
}
public Operation getCurrentWriteOp() {
throw new UnsupportedOperationException();
}
public ByteBuffer getRbuf() {
throw new UnsupportedOperationException();
}
public int getReconnectCount() {
return root.getReconnectCount();
}
public int getSelectionOps() {
return root.getSelectionOps();
}
public SelectionKey getSk() {
throw new UnsupportedOperationException();
}
public SocketAddress getSocketAddress() {
return root.getSocketAddress();
}
public ByteBuffer getWbuf() {
throw new UnsupportedOperationException();
}
public boolean hasReadOp() {
return root.hasReadOp();
}
public boolean hasWriteOp() {
return root.hasReadOp();
}
public boolean isActive() {
return root.isActive();
}
public void reconnecting() {
throw new UnsupportedOperationException();
}
public void registerChannel(SocketChannel ch, SelectionKey selectionKey) {
throw new UnsupportedOperationException();
}
public Operation removeCurrentReadOp() {
throw new UnsupportedOperationException();
}
public Operation removeCurrentWriteOp() {
throw new UnsupportedOperationException();
}
public void setChannel(SocketChannel to) {
throw new UnsupportedOperationException();
}
public void setSk(SelectionKey to) {
throw new UnsupportedOperationException();
}
public void setupResend() {
throw new UnsupportedOperationException();
}
public void transitionWriteItem() {
throw new UnsupportedOperationException();
}
public int writeSome() throws IOException {
throw new UnsupportedOperationException();
}
public Collection<Operation> destroyInputQueue() {
throw new UnsupportedOperationException();
}
public void authComplete() {
throw new UnsupportedOperationException();
}
public void setupForAuth() {
throw new UnsupportedOperationException();
}
public int getContinuousTimeout() {
throw new UnsupportedOperationException();
}
public void setContinuousTimeout(boolean isIncrease) {
throw new UnsupportedOperationException();
}
public boolean isAuthenticated() {
throw new UnsupportedOperationException();
}
public long lastReadDelta() {
throw new UnsupportedOperationException();
}
public void completedRead() {
throw new UnsupportedOperationException();
}
public MemcachedConnection getConnection() {
throw new UnsupportedOperationException();
}
public void setConnection(MemcachedConnection connection) {
throw new UnsupportedOperationException();
}
}
| 3,989 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheNode.java
|
package net.spy.memcached;
import java.util.List;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.Tag;
public interface EVCacheNode extends MemcachedNode {
void registerMonitors();
boolean isAvailable(EVCache.Call call);
int getWriteQueueSize();
int getReadQueueSize();
int getInputQueueSize();
long incrOps();
long getNumOfOps();
void flushInputQueue();
long getStartTime();
long getTimeoutStartTime();
void removeMonitoring();
void shutdown();
long getCreateTime();
void setConnectTime(long cTime);
String getAppName();
String getHostName();
ServerGroup getServerGroup();
int getId();
List<Tag> getTags();
int getTotalReconnectCount();
String getSocketChannelLocalAddress();
String getSocketChannelRemoteAddress();
String getConnectTime();
int getContinuousTimeout();
int getReconnectCount();
boolean isActive();
EVCacheClient getEVCacheClient();
}
| 3,990 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/ExecCmdOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import java.util.Arrays;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class ExecCmdOperationImpl extends OperationImpl implements ExecCmdOperation {
private static final OperationStatus OK = new OperationStatus(true, "OK",
StatusCode.SUCCESS);
private static final OperationStatus ERROR = new OperationStatus(true, "ERROR",
StatusCode.ERR_INTERNAL);
private final byte[] cmd;
public ExecCmdOperationImpl(String arg, ExecCmdOperation.Callback c) {
super(c);
this.cmd = (arg + "\r\n").getBytes();
}
@Override
public void initialize() {
setBuffer(ByteBuffer.wrap(cmd));
}
@Override
public void handleLine(String line) {
if (line.equals("OK")) {
callback.receivedStatus(OK);
transitionState(OperationState.COMPLETE);
} else if (line.equals("ERROR")) {
callback.receivedStatus(ERROR);
transitionState(OperationState.COMPLETE);
}
}
@Override
protected void wasCancelled() {
callback.receivedStatus(CANCELLED);
}
@Override
public String toString() {
return "Cmd: " + Arrays.toString(cmd);
}
}
| 3,991 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaArithmeticOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.MutatorOperation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.StatusCode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
/**
* Operation for Meta Arithmetic commands of memcached.
*/
public class MetaArithmeticOperationImpl extends EVCacheOperationImpl implements
MutatorOperation {
private static final Logger log = LoggerFactory.getLogger(MetaArithmeticOperationImpl.class);
private static final OperationStatus NOT_FOUND = new OperationStatus(false,
"NOT_FOUND", StatusCode.ERR_NOT_FOUND);
// TODO : Move to a Builder as we expand this to support better isolation guarantees
// Request
private static final String META_ARITHMETIC_OP = "ma";
private static final String AUTO_CREATE = "N%d";
private static final String MUTATOR_MODE ="M%c";
private static final char INCR = '+';
private static final char DECR = '-';
private static final String DEFAULT = "J%d";
private static final String DELTA = "D%d";
private static final char FLAG_VALUE = 'v';
// Response
private static final String VALUE_RETURN = "VA";
private final Mutator mutator;
private final String key;
private final long amount;
private final long def;
private final int exp;
private boolean readingValue;
public static final int OVERHEAD = 32;
public MetaArithmeticOperationImpl(Mutator m, String k, long amt, long defaultVal,
int expiry, OperationCallback c) {
super(c);
mutator = m;
key = k;
amount = amt;
def = defaultVal;
exp = expiry;
readingValue = false;
}
@Override
public void handleLine(String line) {
log.debug("Result: %s", line);
OperationStatus found = null;
if (line.startsWith(VALUE_RETURN)) {
// TODO : We may need to tokenize this when more flags are supplied to the request.
this.readingValue = true;
// Ask state machine to read the next line which has the response
this.setReadType(OperationReadType.LINE);
return;
} else if (readingValue) {
// TODO : Tokenize if multiple values are in this line, as of now, it's just the result.
found = new OperationStatus(true, line, StatusCode.SUCCESS);
} else {
// TODO: Other NF/NS/EX and also OK are treated as errors, this will change as we extend the meta API
found = NOT_FOUND;
}
getCallback().receivedStatus(found);
transitionState(OperationState.COMPLETE);
}
@Override
public void initialize() {
int size = KeyUtil.getKeyBytes(key).length + OVERHEAD;
ByteBuffer b = ByteBuffer.allocate(size);
setArguments(b, META_ARITHMETIC_OP, key, String.format(AUTO_CREATE, exp),
String.format(MUTATOR_MODE, (mutator == Mutator.incr ? INCR : DECR)),
String.format(DEFAULT,def), String.format(DELTA,amount), FLAG_VALUE);
b.flip();
setBuffer(b);
}
public Collection<String> getKeys() {
return Collections.singleton(key);
}
public long getBy() {
return amount;
}
public long getDefault() {
return def;
}
public int getExpiration() {
return exp;
}
public Mutator getType() {
return mutator;
}
@Override
public String toString() {
return "Cmd: " + mutator.name() + " Key: " + key + " Amount: " + amount +
" Default: " + def + " Expiry: " + exp;
}
}
| 3,992 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/EVCacheAsciiNodeImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.joda.time.format.ISODateTimeFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.ConnectionFactory;
import net.spy.memcached.EVCacheNode;
import net.spy.memcached.EVCacheNodeMBean;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.protocol.ProxyCallback;
import net.spy.memcached.protocol.TCPMemcachedNodeImpl;
public class EVCacheAsciiNodeImpl extends TCPMemcachedNodeImpl implements EVCacheNodeMBean, EVCacheNode {
private static final Logger log = LoggerFactory.getLogger(EVCacheAsciiNodeImpl.class);
protected long stTime;
protected final String hostName;
protected final BlockingQueue<Operation> readQ;
protected final BlockingQueue<Operation> inputQueue;
protected final EVCacheClient client;
private final AtomicInteger numOps = new AtomicInteger(0);
private long timeoutStartTime;
protected final Counter operationsCounter;
public EVCacheAsciiNodeImpl(SocketAddress sa, SocketChannel c, int bufSize, BlockingQueue<Operation> rq, BlockingQueue<Operation> wq, BlockingQueue<Operation> iq,
long opQueueMaxBlockTimeMillis, boolean waitForAuth, long dt, long at, ConnectionFactory fa, EVCacheClient client, long stTime) {
// ASCII never does auth
super(sa, c, bufSize, rq, wq, iq, opQueueMaxBlockTimeMillis, false, dt, at, fa);
this.client = client;
final String appName = client.getAppName();
this.readQ = rq;
this.inputQueue = iq;
this.hostName = ((InetSocketAddress) getSocketAddress()).getHostName();
this.operationsCounter = client.getOperationCounter();
setConnectTime(stTime);
setupMonitoring(appName);
}
@Override
protected void optimize() {
// make sure there are at least two get operations in a row before
// attempting to optimize them.
if (writeQ.peek() instanceof GetOperation) {
optimizedOp = writeQ.remove();
if (writeQ.peek() instanceof GetOperation) {
OptimizedGetImpl og = new OptimizedGetImpl((GetOperation) optimizedOp);
optimizedOp = og;
while (writeQ.peek() instanceof GetOperation) {
GetOperationImpl o = (GetOperationImpl) writeQ.remove();
if (!o.isCancelled()) {
og.addOperation(o);
}
}
// Initialize the new mega get
optimizedOp.initialize();
assert optimizedOp.getState() == OperationState.WRITE_QUEUED;
ProxyCallback pcb = (ProxyCallback) og.getCallback();
getLogger().debug("Set up %s with %s keys and %s callbacks", this,
pcb.numKeys(), pcb.numCallbacks());
}
}
}
private String getMonitorName(String appName) {
return "com.netflix.evcache:Group=" + appName + ",SubGroup=pool" + ",SubSubGroup=" + client.getServerGroupName()
+ ",SubSubSubGroup=" + client.getId() + ",SubSubSubSubGroup=" + hostName
+ "_" + stTime;
}
private void setupMonitoring(String appName) {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(appName));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
public void registerMonitors() {
}
public boolean isAvailable(EVCache.Call call) {
return isActive();
}
public int getWriteQueueSize() {
return writeQ.size();
}
public int getReadQueueSize() {
return readQ.size();
}
public int getInputQueueSize() {
return inputQueue.size();
}
public long incrOps() {
operationsCounter.increment();
return numOps.incrementAndGet();
}
public long getNumOfOps() {
return numOps.get();
}
public void flushInputQueue() {
inputQueue.clear();
}
public long getStartTime() {
return stTime;
}
public long getTimeoutStartTime() {
return timeoutStartTime;
}
public void removeMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(client.getAppName()));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
public void shutdown() {
removeMonitoring();
writeQ.clear();
readQ.clear();
inputQueue.clear();
}
public long getCreateTime() {
return stTime;
}
public void setConnectTime(long cTime) {
this.stTime = cTime;
}
public String getAppName() {
return client.getAppName();
}
public String getHostName() {
return hostName;
}
public ServerGroup getServerGroup() {
return client.getServerGroup();
}
public int getId() {
return client.getId();
}
public List<Tag> getTags() {
return client.getTagList();
}
public int getTotalReconnectCount() {
return getReconnectCount();
}
@Override
public String getSocketChannelLocalAddress() {
try {
if(getChannel() != null) {
return getChannel().getLocalAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
@Override
public String getSocketChannelRemoteAddress() {
try {
if(getChannel() != null) {
return getChannel().getRemoteAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
@Override
public String getConnectTime() {
return ISODateTimeFormat.dateTime().print(stTime);
}
@Override
public EVCacheClient getEVCacheClient() {
return client;
}
}
| 3,993 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaDebugOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class MetaDebugOperationImpl extends EVCacheOperationImpl implements MetaDebugOperation {
private static final Logger log = LoggerFactory.getLogger(MetaDebugOperationImpl.class);
private static final OperationStatus END = new OperationStatus(true, "EN", StatusCode.SUCCESS);
private static final int OVERHEAD = 32;
private final MetaDebugOperation.Callback cb;
private final String key;
public MetaDebugOperationImpl(String k, MetaDebugOperation.Callback cb) {
super(cb);
this.key = k;
this.cb = cb;
}
@Override
public void handleLine(String line) {
if(log.isDebugEnabled()) log.debug("meta debug of {} returned {}", key, line);
if (line.equals("EN")) {
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
} else {
String[] parts = line.split(" ", 3);
if(log.isDebugEnabled()) log.debug("Num of parts "+ parts.length);
if(parts.length <= 2) return;
String[] kvPairs = parts[2].split(" ");
for(String kv : kvPairs) {
if(log.isDebugEnabled()) log.debug("kv "+ kv);
String[] tuple = kv.split("=",2);
if(log.isDebugEnabled()) log.debug("{} = {}", tuple[0], tuple[1]);
cb.debugInfo(tuple[0], tuple[1]);
}
}
getCallback().receivedStatus(matchStatus(line, END));
transitionState(OperationState.COMPLETE);
}
@Override
public void initialize() {
ByteBuffer b = ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + OVERHEAD);
setArguments(b, "me", key);
b.flip();
setBuffer(b);
}
@Override
public String toString() {
return "Cmd: me Key: " + key;
}
}
| 3,994 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/ExecCmdOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface ExecCmdOperation extends Operation {
/**
* Callback for cmd operation.
*/
interface Callback extends OperationCallback {
}
}
| 3,995 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/EVCacheOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.OperationCallback;
public class EVCacheOperationImpl extends OperationImpl {
protected EVCacheOperationImpl(OperationCallback cb) {
super(cb);
}
@Override
public void handleLine(String line) {
// TODO Auto-generated method stub
}
@Override
public void initialize() {
// TODO Auto-generated method stub
}
}
| 3,996 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaGetOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class MetaGetOperationImpl extends EVCacheOperationImpl implements MetaGetOperation {
private static final Logger log = LoggerFactory.getLogger(MetaGetOperationImpl.class);
private static final OperationStatus END = new OperationStatus(true, "EN", StatusCode.SUCCESS);
private static final int OVERHEAD = 32;
private final MetaGetOperation.Callback cb;
private final String key;
private int currentFlag = -1;
private byte[] data = null;
private int readOffset = 0;
private byte lookingFor = '\0';
public MetaGetOperationImpl(String k, MetaGetOperation.Callback cb) {
super(cb);
this.key = k;
this.cb = cb;
}
@Override
public void handleLine(String line) {
if(log.isDebugEnabled()) log.debug("meta get of {} returned {}", key, line);
if (line.length() == 0 || line.equals("EN")) {
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
} else if (line.startsWith("VA")) {
String[] parts = line.split(" ");
if(log.isDebugEnabled()) log.debug("Num of parts "+ parts.length);
if(parts.length <= 2) return;
int size = Integer.parseInt(parts[1]);
if(log.isDebugEnabled()) log.debug("Size of value in bytes : "+ size);
data = new byte[size];
for(int i = 2; i < parts.length; i++) {
final char flag = parts[i].charAt(0);
final String val = parts[i].substring(1);
if(log.isDebugEnabled()) log.debug("flag="+ flag + "; Val=" + val);
cb.gotMetaData(key, flag, val);
if(flag == 'f') currentFlag = Integer.parseInt(val);
}
setReadType(OperationReadType.DATA);
}
}
public void handleRead(ByteBuffer b) {
if(log.isDebugEnabled()) log.debug("readOffset: {}, length: {}", readOffset, data.length);
// If we're not looking for termination, we're still looking for data
if (lookingFor == '\0') {
int toRead = data.length - readOffset;
int available = b.remaining();
toRead = Math.min(toRead, available);
if(log.isDebugEnabled()) log.debug("Reading {} bytes", toRead);
b.get(data, readOffset, toRead);
readOffset += toRead;
}
// Transition us into a ``looking for \r\n'' kind of state if we've
// read enough and are still in a data state.
if (readOffset == data.length && lookingFor == '\0') {
// The callback is most likely a get callback. If it's not, then
// it's a gets callback.
OperationCallback cb = getCallback();
if (cb instanceof MetaGetOperation.Callback) {
MetaGetOperation.Callback mgcb = (MetaGetOperation.Callback) cb;
mgcb.gotData(key, currentFlag, data);
}
lookingFor = '\r';
}
// If we're looking for an ending byte, let's go find it.
if (lookingFor != '\0' && b.hasRemaining()) {
do {
byte tmp = b.get();
assert tmp == lookingFor : "Expecting " + lookingFor + ", got "
+ (char) tmp;
switch (lookingFor) {
case '\r':
lookingFor = '\n';
break;
case '\n':
lookingFor = '\0';
break;
default:
assert false : "Looking for unexpected char: " + (char) lookingFor;
}
} while (lookingFor != '\0' && b.hasRemaining());
// Completed the read, reset stuff.
if (lookingFor == '\0') {
data = null;
readOffset = 0;
currentFlag = -1;
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
getLogger().debug("Setting read type back to line.");
setReadType(OperationReadType.LINE);
}
}
}
@Override
public void initialize() {
final String flags = "s f t h l c v";
final ByteBuffer b = ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + flags.length() + OVERHEAD);
setArguments(b, "mg", key, flags);
b.flip();
setBuffer(b);
}
@Override
public String toString() {
return "Cmd: me Key: " + key;
}
}
| 3,997 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaGetOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface MetaGetOperation extends Operation {
/**
* Operation callback for the get request.
*/
public interface Callback extends OperationCallback {
/**
* Callback for each result from each meta data.
*
* @param key the key that was retrieved
* @param flag all the flag
* @param data the data for the flag
*/
void gotMetaData(String key, char flag, String data);
/**
* Callback for result from a get.
*
* @param key the key that was retrieved
* @param flag the flag for this value
* @param data the data stored under this key
*/
void gotData(String key, int flag, byte[] data);
}
}
| 3,998 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaDebugOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface MetaDebugOperation extends Operation {
/**
* Operation callback for the get request.
*/
public interface Callback extends OperationCallback {
/**
* Callback for each result from a get.
*
* @param key the key that was retrieved
* @param flags the flags for this value
* @param data the data stored under this key
*/
void debugInfo(String key, String val);
}
}
| 3,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.