index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/Pair.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.util;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
public class Pair<L, R> {
private final L left;
private final R right;
/**
* @param left
* @param right
*/
public Pair(L left, R right) {
Validate.notNull(left);
this.left = left;
Validate.notNull(right);
this.right = right;
}
/**
* @return L
*/
public L getLeft() {
return left;
}
/**
* @return R
*/
public R getRight() {
return right;
}
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object other) {
if (other == this) return true;
if (!(other instanceof Pair))
return false;
Pair<?,?> that = (Pair<?,?>) other;
return new EqualsBuilder().
append(this.left, that.left).
append(this.right, that.left).
isEquals();
}
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder().
append(this.left).
append(this.right).
toHashCode();
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("(").
append(this.left).
append(",").
append(this.right).
append(")");
return sb.toString();
}
}
| 700 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/PaasUtils.java
|
/*
* Copyright (C) 2012 DataStax Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.staash.rest.util;
import java.math.*;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.*;
/**
* A number of static fields/methods handy for tests.
*/
public abstract class PaasUtils {
private static final Logger logger = LoggerFactory.getLogger(PaasUtils.class);
public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }";
public static final String CREATE_KEYSPACE_GENERIC_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : '%s', %s }";
public static final String SIMPLE_KEYSPACE = "ks";
public static final String SIMPLE_TABLE = "test";
public static final String CREATE_TABLE_SIMPLE_FORMAT = "CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)";
public static final String INSERT_FORMAT = "INSERT INTO %s (key, column1, value) VALUES ('%s', '%s', '%s')";
public static final String SELECT_ALL_FORMAT = "SELECT * FROM %s";
public static BoundStatement setBoundValue(BoundStatement bs, String name, DataType type, Object value) {
switch (type.getName()) {
case ASCII:
bs.setString(name, (String)value);
break;
case BIGINT:
bs.setLong(name, (Long)value);
break;
case BLOB:
bs.setBytes(name, (ByteBuffer)value);
break;
case BOOLEAN:
bs.setBool(name, (Boolean)value);
break;
case COUNTER:
// Just a no-op, we shouldn't handle counters the same way than other types
break;
case DECIMAL:
bs.setDecimal(name, (BigDecimal)value);
break;
case DOUBLE:
bs.setDouble(name, (Double)value);
break;
case FLOAT:
bs.setFloat(name, (Float)value);
break;
case INET:
bs.setInet(name, (InetAddress)value);
break;
case INT:
bs.setInt(name, (Integer)value);
break;
case TEXT:
bs.setString(name, (String)value);
break;
case TIMESTAMP:
bs.setDate(name, (Date)value);
break;
case UUID:
bs.setUUID(name, (UUID)value);
break;
case VARCHAR:
bs.setString(name, (String)value);
break;
case VARINT:
bs.setVarint(name, (BigInteger)value);
break;
case TIMEUUID:
bs.setUUID(name, (UUID)value);
break;
case LIST:
bs.setList(name, (List)value);
break;
case SET:
bs.setSet(name, (Set)value);
break;
case MAP:
bs.setMap(name, (Map)value);
break;
default:
throw new RuntimeException("Missing handling of " + type);
}
return bs;
}
public static Object getValue(Row row, String name, DataType type) {
switch (type.getName()) {
case ASCII:
return row.getString(name);
case BIGINT:
return row.getLong(name);
case BLOB:
return row.getBytes(name);
case BOOLEAN:
return row.getBool(name);
case COUNTER:
return row.getLong(name);
case DECIMAL:
return row.getDecimal(name);
case DOUBLE:
return row.getDouble(name);
case FLOAT:
return row.getFloat(name);
case INET:
return row.getInet(name);
case INT:
return row.getInt(name);
case TEXT:
return row.getString(name);
case TIMESTAMP:
return row.getDate(name);
case UUID:
return row.getUUID(name);
case VARCHAR:
return row.getString(name);
case VARINT:
return row.getVarint(name);
case TIMEUUID:
return row.getUUID(name);
case LIST:
return row.getList(name, classOf(type.getTypeArguments().get(0)));
case SET:
return row.getSet(name, classOf(type.getTypeArguments().get(0)));
case MAP:
return row.getMap(name, classOf(type.getTypeArguments().get(0)), classOf(type.getTypeArguments().get(1)));
}
throw new RuntimeException("Missing handling of " + type);
}
private static Class classOf(DataType type) {
assert !type.isCollection();
switch (type.getName()) {
case ASCII:
case TEXT:
case VARCHAR:
return String.class;
case BIGINT:
case COUNTER:
return Long.class;
case BLOB:
return ByteBuffer.class;
case BOOLEAN:
return Boolean.class;
case DECIMAL:
return BigDecimal.class;
case DOUBLE:
return Double.class;
case FLOAT:
return Float.class;
case INET:
return InetAddress.class;
case INT:
return Integer.class;
case TIMESTAMP:
return Date.class;
case UUID:
case TIMEUUID:
return UUID.class;
case VARINT:
return BigInteger.class;
}
throw new RuntimeException("Missing handling of " + type);
}
// Always return the "same" value for each type
public static Object getFixedValue(final DataType type) {
try {
switch (type.getName()) {
case ASCII:
return "An ascii string";
case BIGINT:
return 42L;
case BLOB:
return ByteBuffer.wrap(new byte[]{ (byte)4, (byte)12, (byte)1 });
case BOOLEAN:
return true;
case COUNTER:
throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters");
case DECIMAL:
return new BigDecimal("3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679");
case DOUBLE:
return 3.142519;
case FLOAT:
return 3.142519f;
case INET:
return InetAddress.getByAddress(new byte[]{(byte)127, (byte)0, (byte)0, (byte)1});
case INT:
return 24;
case TEXT:
return "A text string";
case TIMESTAMP:
return new Date(1352288289L);
case UUID:
return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B");
case VARCHAR:
return "A varchar string";
case VARINT:
return new BigInteger("123456789012345678901234567890");
case TIMEUUID:
return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66");
case LIST:
return new ArrayList(){{ add(getFixedValue(type.getTypeArguments().get(0))); }};
case SET:
return new HashSet(){{ add(getFixedValue(type.getTypeArguments().get(0))); }};
case MAP:
return new HashMap(){{ put(getFixedValue(type.getTypeArguments().get(0)), getFixedValue(type.getTypeArguments().get(1))); }};
}
} catch (Exception e) {
throw new RuntimeException(e);
}
throw new RuntimeException("Missing handling of " + type);
}
// Always return the "same" value for each type
public static Object getFixedValue2(final DataType type) {
try {
switch (type.getName()) {
case ASCII:
return "A different ascii string";
case BIGINT:
return Long.MAX_VALUE;
case BLOB:
ByteBuffer bb = ByteBuffer.allocate(64);
bb.putInt(0xCAFE);
bb.putShort((short) 3);
bb.putShort((short) 45);
return bb;
case BOOLEAN:
return false;
case COUNTER:
throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters");
case DECIMAL:
return new BigDecimal("12.3E+7");
case DOUBLE:
return Double.POSITIVE_INFINITY;
case FLOAT:
return Float.POSITIVE_INFINITY;
case INET:
return InetAddress.getByName("123.123.123.123");
case INT:
return Integer.MAX_VALUE;
case TEXT:
return "r??sum??";
case TIMESTAMP:
return new Date(872835240000L);
case UUID:
return UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00");
case VARCHAR:
return "A different varchar r??sum??";
case VARINT:
return new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000");
case TIMEUUID:
return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66");
case LIST:
return new ArrayList(){{ add(getFixedValue2(type.getTypeArguments().get(0))); }};
case SET:
return new HashSet(){{ add(getFixedValue2(type.getTypeArguments().get(0))); }};
case MAP:
return new HashMap(){{ put(getFixedValue2(type.getTypeArguments().get(0)), getFixedValue2(type.getTypeArguments().get(1))); }};
}
} catch (Exception e) {
throw new RuntimeException(e);
}
throw new RuntimeException("Missing handling of " + type);
}
// Wait for a node to be up and running
// This is used because there is some delay between when a node has been
// added through ccm and when it's actually available for querying
public static void waitFor(String node, Cluster cluster) {
waitFor(node, cluster, 20, false, false);
}
public static void waitFor(String node, Cluster cluster, int maxTry) {
waitFor(node, cluster, maxTry, false, false);
}
public static void waitForDown(String node, Cluster cluster) {
waitFor(node, cluster, 20, true, false);
}
public static void waitForDownWithWait(String node, Cluster cluster, int waitTime) {
waitFor(node, cluster, 20, true, false);
// FIXME: Once stop() works, remove this line
try {
Thread.sleep(waitTime * 1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public static void waitForDown(String node, Cluster cluster, int maxTry) {
waitFor(node, cluster, maxTry, true, false);
}
public static void waitForDecommission(String node, Cluster cluster) {
waitFor(node, cluster, 20, true, true);
}
public static void waitForDecommission(String node, Cluster cluster, int maxTry) {
waitFor(node, cluster, maxTry, true, true);
}
private static void waitFor(String node, Cluster cluster, int maxTry, boolean waitForDead, boolean waitForOut) {
if (waitForDead || waitForOut)
if (waitForDead)
logger.info("Waiting for stopped node: " + node);
else if (waitForOut)
logger.info("Waiting for decommissioned node: " + node);
else
logger.info("Waiting for upcoming node: " + node);
// In the case where the we've killed the last node in the cluster, if we haven't
// tried doing an actual query, the driver won't realize that last node is dead until
// keep alive kicks in, but that's a fairly long time. So we cheat and trigger a force
// the detection by forcing a request.
// if (waitForDead || waitForOut)
// cluster.manager.submitSchemaRefresh(null, null);
InetAddress address;
try {
address = InetAddress.getByName(node);
} catch (Exception e) {
// That's a problem but that's not *our* problem
return;
}
Metadata metadata = cluster.getMetadata();
for (int i = 0; i < maxTry; ++i) {
for (Host host : metadata.getAllHosts()) {
if (host.getAddress().equals(address) && testHost(host, waitForDead))
return;
}
try { Thread.sleep(1000); } catch (Exception e) {}
}
for (Host host : metadata.getAllHosts()) {
if (host.getAddress().equals(address)) {
if (testHost(host, waitForDead)) {
return;
} else {
// logging it because this give use the timestamp of when this happens
logger.info(node + " is not " + (waitForDead ? "DOWN" : "UP") + " after " + maxTry + "s");
throw new IllegalStateException(node + " is not " + (waitForDead ? "DOWN" : "UP") + " after " + maxTry + "s");
}
}
}
if (waitForOut){
return;
} else {
logger.info(node + " is not part of the cluster after " + maxTry + "s");
throw new IllegalStateException(node + " is not part of the cluster after " + maxTry + "s");
}
}
private static boolean testHost(Host host, boolean testForDown) {
return testForDown ? !host.getMonitor().isUp() : host.getMonitor().isUp();
}
}
| 701 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/HostSupplier.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.util;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Supplier;
import com.netflix.astyanax.connectionpool.Host;
public class HostSupplier implements Supplier<List<Host>> {
public List<Host> get() {
// TODO Auto-generated method stub
List<Host> list = new ArrayList<Host>();
// Host h1 = new Host("ec2-54-235-224-8.compute-1.amazonaws.com",7102);
Host h1 = new Host("54.235.224.8",7102);
// Host h2 = new Host("ec2-54-224-106-243.compute-1.amazonaws.com",7102);
Host h2 = new Host("54.224.106.243",7102);
// Host h3 = new Host("ec2-54-242-127-138.compute-1.amazonaws.com",7102);
Host h3 = new Host("54.242.127.138",7102);
list.add(h1);
list.add(h2);
list.add(h3);
return list;
}
}
| 702 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/StaashRequestContext.java
|
package com.netflix.staash.rest.util;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple class that encapsulates a ThreadLocal of a map of context.
* This context can be used by various classes that service the request and
* can store vital info that can be used for debugging.
*
* @author poberai
*
*/
public class StaashRequestContext {
private static final Logger Logger = LoggerFactory.getLogger(StaashRequestContext.class);
private static final DateTimeFormatter dateFormat = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS z");
private static final ThreadLocal<StaashRequestContext> requestContext = new ThreadLocal<StaashRequestContext>() {
@Override
protected StaashRequestContext initialValue() {
return new StaashRequestContext();
}
};
public static void resetRequestContext() {
requestContext.set(new StaashRequestContext());
}
public static void flushRequestContext() {
Logger.info(requestContext.get().getMapContents());
}
public static void addContext(String key, String value) {
requestContext.get().addContextToMap(key, value);
}
public static void logDate() {
requestContext.get().addDateToMap();
}
public static void recordRequestStart() {
requestContext.get().startTime.set(System.currentTimeMillis());
}
public static void recordRequestEnd() {
Long begin = requestContext.get().startTime.get();
Long now = System.currentTimeMillis();
requestContext.get().addContextToMap("Duration", String.valueOf(now - begin));
}
public static String getRequestId() {
return requestContext.get().requestId;
}
private final ConcurrentHashMap<String, String> map = new ConcurrentHashMap<String, String>();
private final AtomicLong startTime = new AtomicLong(0L);
private final String requestId = UUID.randomUUID().toString();
private StaashRequestContext() {
map.put("request-id", requestId);
}
private void addContextToMap(String key, String value) {
map.put(key, value);
}
private void addDateToMap() {
DateTime dt = new DateTime();
map.put("Date", dateFormat.print(dt));
}
private String getMapContents() {
if (map == null) {
return null;
}
StringBuilder sb = new StringBuilder("\n========================STAASH REQUEST CONTEXT===========================================");
for (String key : map.keySet()) {
sb.append("\n").append(key).append(":").append(map.get(key));
}
sb.append("\n======================================================================================");
return sb.toString();
}
}
| 703 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/StaashConstants.java
|
package com.netflix.staash.rest.util;
public interface StaashConstants {
public static final int MAX_FILE_UPLOAD_SIZE_IN_KB = 5000;
public static final int CHUNK_SIZE_IN_KB = 256;
}
| 704 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/util/MetaConstants.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.util;
public interface MetaConstants {
public static final String CASSANDRA_KEYSPACE_ENTITY_TYPE = "com.netflix.entity.type.cassandra.keyspace";
public static final String STAASH_TABLE_ENTITY_TYPE = "com.netflix.entity.type.staash.table";
public static final String STAASH_STORAGE_TYPE_ENTITY = "com.netflix.entity.type.staash.storage";
public static final String STAASH_DB_ENTITY_TYPE = "com.netflix.entity.type.staash.db";
public static final String STAASH_TS_ENTITY_TYPE = "com.netflix.entity.type.staash.timeseries";
public static final String STAASH_KV_ENTITY_TYPE = "com.netflix.entity.type.staash.keyvaluestore";
public static final String CASSANDRA_CF_TYPE = "com.netflix.entity.type.cassandra.columnfamily";
public static final String CASSANDRA_TIMESERIES_TYPE = "com.netflix.entity.type.cassandra.timeseries";
public static final String PAAS_CLUSTER_ENTITY_TYPE = "com.netflix.entity.type.staash.table";
public static final String STORAGE_TYPE = "com.netflix.trait.type.storagetype";
public static final String RESOLUTION_TYPE = "com.netflix.trait.type.resolutionstring";
public static final String NAME_TYPE = "com.netflix.trait.type.name";
public static final String RF_TYPE = "com.netflix.trait.type.replicationfactor";
public static final String STRATEGY_TYPE = "com.netflix.trait.type.strategy";
public static final String COMPARATOR_TYPE = "com.netflix.trait.type.comparator";
public static final String KEY_VALIDATION_CLASS_TYPE = "com.netflix.trait.type.key_validation_class";
public static final String COLUMN_VALIDATION_CLASS_TYPE = "com.netflix.trait.type.validation_class";
public static final String DEFAULT_VALIDATION_CLASS_TYPE = "com.netflix.trait.type.default_validation_class";
public static final String COLUMN_NAME_TYPE = "com.netflix.trait.type.colum_name";
public static final String CONTAINS_TYPE = "com.netflix.relation.type.contains";
public static final String PERIOD_TIME_SERIES = "period";
public static final String PREFIX_TIME_SERIES = "prefix";
public static final String META_KEY_SPACE = "staashmetaks_cde";
public static final String META_COLUMN_FAMILY = "staashmetacf_cde";
}
| 705 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/resources/StaashDataResourceImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.resources;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.UUID;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import com.google.common.io.Files;
import com.google.inject.Inject;
import com.netflix.staash.json.JsonArray;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.StaashConstants;
import com.netflix.staash.rest.util.StaashRequestContext;
import com.netflix.staash.service.DataService;
import com.sun.jersey.core.header.FormDataContentDisposition;
import com.sun.jersey.multipart.FormDataParam;
import com.sun.jersey.spi.container.ResourceFilters;
@Path("/staash/v1/data")
public class StaashDataResourceImpl {
private DataService datasvc;
@Inject
public StaashDataResourceImpl(DataService data) {
this.datasvc = data;
}
@GET
@Path("{db}/{table}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String listAllRow(@PathParam("db") String db,
@PathParam("table") String table) {
return datasvc.listRow(db, table, "", "");
}
@GET
@Path("{db}/{table}/{keycol}/{key}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String listRow(@PathParam("db") String db,
@PathParam("table") String table,
@PathParam("keycol") String keycol, @PathParam("key") String key) {
return datasvc.listRow(db, table, keycol, key);
}
@GET
@Path("/join/{db}/{table1}/{table2}/{joincol}/{value}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String doJoin(@PathParam("db") String db,
@PathParam("table1") String table1,
@PathParam("table2") String table2,
@PathParam("joincol") String joincol,
@PathParam("value") String value) {
return datasvc.doJoin(db, table1, table2, joincol, value);
}
@GET
@Path("/timeseries/{db}/{table}/{eventtime}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String readEvent(@PathParam("db") String db,
@PathParam("table") String table,
@PathParam("eventtime") String time) {
String out;
try {
out = datasvc.readEvent(db, table, time);
} catch (RuntimeException e) {
out = "{\"msg\":\"" + e.getMessage() + "\"}";
}
return out;
}
@GET
@Path("/timeseries/{db}/{table}/{prefix}/{eventtime}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String readEvent(@PathParam("db") String db,
@PathParam("table") String table,
@PathParam("prefix") String prefix,
@PathParam("eventtime") String time) {
String out;
try {
out = datasvc.readEvent(db, table, prefix, time);
} catch (RuntimeException e) {
out = "{\"msg\":\"" + e.getMessage() + "\"}";
}
return out;
}
@GET
@Path("/timeseries/{db}/{table}/{prefix}/{starttime}/{endtime}")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String readEvent(@PathParam("db") String db,
@PathParam("table") String table,
@PathParam("prefix") String prefix,
@PathParam("starttime") String starttime,
@PathParam("endtime") String endtime) {
String out;
try {
out = datasvc.readEvent(db, table, prefix, starttime, endtime);
} catch (RuntimeException e) {
out = "{\"msg\":\"" + e.getMessage() + "\"}";
}
return out;
}
@POST
@Path("{db}/{table}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String updateRow(@PathParam("db") String db,
@PathParam("table") String table, String rowObject) {
return datasvc.writeRow(db, table, new JsonObject(rowObject));
}
@POST
@Path("/timeseries/{db}/{table}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String insertEvent(@PathParam("db") String db,
@PathParam("table") String table, String rowStr) {
JsonArray eventsArr = new JsonArray(rowStr);
return datasvc.writeEvents(db, table, eventsArr);
}
@GET
@Path("/kvstore/{key}")
@Produces(MediaType.APPLICATION_OCTET_STREAM)
@ResourceFilters(StaashAuditFilter.class)
public byte[] getObject(@PathParam("key") String key) {
byte[] value = datasvc.readChunked("kvstore", "kvmap", key);
StaashRequestContext.addContext("N-BYTES", String.valueOf(value.length));
return value;
}
@POST
@Path("/kvstore")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String storeFile(
@FormDataParam("value") InputStream uploadedInputStream,
@FormDataParam("value") FormDataContentDisposition fileDetail) {
try {
writeToChunkedKVStore(uploadedInputStream, fileDetail.getFileName());
} catch (IOException e) {
e.printStackTrace();
return "{\"msg\":\"file could not be uploaded\"}";
}
return "{\"msg\":\"file successfully uploaded\"}";
}
@POST
@Path("/kvstore/name/{name}")
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String storeNamedFile(
@FormDataParam("value") InputStream uploadedInputStream,
@PathParam("name") String name) {
try {
writeToChunkedKVStore(uploadedInputStream, name);
} catch (IOException e) {
e.printStackTrace();
return "{\"msg\":\"file could not be uploaded\"}";
}
return "{\"msg\":\"file successfully uploaded\"}";
}
private void writeToChunkedKVStore(InputStream is, String objectName) throws IOException {
InputStream input = null;
File tmpFile = null;
try {
String uploadedFileLocation = "/tmp/" + "staashFile-" + UUID.randomUUID();
tmpFile = new File(uploadedFileLocation);
OutputStream out = new FileOutputStream(tmpFile);
int read = 0;
byte[] bytes = new byte[1024];
out = new FileOutputStream(new File(uploadedFileLocation));
while ((read = is.read(bytes)) != -1) {
out.write(bytes, 0, read);
}
out.flush();
out.close();
byte[] fbytes = Files.toByteArray(new File(uploadedFileLocation));
StaashRequestContext.addContext("N-BYTES", String.valueOf(fbytes.length));
if (fbytes!=null && fbytes.length>StaashConstants.MAX_FILE_UPLOAD_SIZE_IN_KB*1000) {
throw new RuntimeException("File is too large to upload, max size supported is 2MB");
}
input = new FileInputStream(new File(uploadedFileLocation));
datasvc.writeChunked("kvstore", "kvmap", objectName, input);
} catch (IOException e) {
throw new RuntimeException(e.getMessage());
} finally {
if (input!=null) input.close();
if (tmpFile!=null) {
tmpFile.delete();
}
}
}
//
// private void writeToKVStore(InputStream uploadedInputStream,
// String uploadedFileName) {
//
// try {
// String uploadedFileLocation = "/tmp/" + uploadedFileName;
// OutputStream out = new FileOutputStream(new File(
// uploadedFileLocation));
// int read = 0;
// byte[] bytes = new byte[1024];
//
// out = new FileOutputStream(new File(uploadedFileLocation));
// while ((read = uploadedInputStream.read(bytes)) != -1) {
// out.write(bytes, 0, read);
// }
// out.flush();
// out.close();
// byte[] fbytes = Files.toByteArray(new File(uploadedFileLocation));
// if (fbytes!=null && fbytes.length>StaashConstants.MAX_FILE_UPLOAD_SIZE_IN_KB*1000) {
// throw new RuntimeException("File is too large to upload, max size supported is 2MB");
// }
// JsonObject obj = new JsonObject();
// obj.putString("key", uploadedFileName);
// obj.putBinary("value", fbytes);
// datasvc.writeToKVStore("kvstore", "kvmapnochunks", obj);
//
// } catch (IOException e) {
// throw new RuntimeException(e.getMessage());
// }
// }
}
| 706 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/resources/StaashAuditFilter.java
|
package com.netflix.staash.rest.resources;
import java.util.List;
import java.util.Map.Entry;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.StaashRequestContext;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
/**
* Class the encapsulates pre and post filters that should be executed when serving requests via jersey resources.
*
* @author poberai
*
*/
public class StaashAuditFilter implements ResourceFilter, ContainerRequestFilter, ContainerResponseFilter {
private static final Logger Logger = LoggerFactory.getLogger(StaashAuditFilter.class);
@Context HttpServletRequest request;
@Context HttpServletResponse response;
public ContainerRequestFilter getRequestFilter() {
return this;
}
public ContainerResponseFilter getResponseFilter() {
return this;
}
public ContainerRequest filter(ContainerRequest cReq) {
Logger.info("StaashAuditFilter PRE");
StaashRequestContext.resetRequestContext();
StaashRequestContext.recordRequestStart();
StaashRequestContext.logDate();
StaashRequestContext.addContext("PATH", cReq.getPath(true));
StaashRequestContext.addContext("METHOD", cReq.getMethod());
Logger.info("Adding headers to request context");
addRequestHeaders(cReq);
Logger.info("Adding query params to request context");
addQueryParameters(cReq);
return cReq;
}
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
Logger.info("StaashAuditFilter POST");
StaashRequestContext.addContext("STATUS", String.valueOf(response.getStatus()));
StaashRequestContext.recordRequestEnd();
StaashRequestContext.flushRequestContext();
// Add RequestId to response
addRequestIdToResponse(response);
return response;
}
/**
* Private helper that adds the request-id to the response payload.
* @param response
*/
private void addRequestIdToResponse(ContainerResponse response) {
// The request-id to be injected in the response
String requestId = StaashRequestContext.getRequestId();
// The key response attributes
int status = response.getStatus();
MediaType mediaType = response.getMediaType();
if (mediaType.equals(MediaType.APPLICATION_JSON_TYPE)) {
String message = (String)response.getEntity();
JsonObject json = new JsonObject(message);
json.putString("request-id", requestId);
Response newJerseyResponse = Response.status(status).type(mediaType).entity(json.toString()).build();
response.setResponse(newJerseyResponse);
}
// Add the request id to the response regardless of the media type,
// this allows non json responses to have a request id in the response
response.getHttpHeaders().add("x-nflx-staash-request-id", requestId);
}
private void addRequestHeaders(ContainerRequest cReq) {
MultivaluedMap<String, String> headers = cReq.getRequestHeaders();
for (Entry<String, List<String>> e : headers.entrySet()) {
StaashRequestContext.addContext("H__" + e.getKey(), e.getValue().toString());
}
}
private void addQueryParameters(ContainerRequest cReq) {
MultivaluedMap<String, String> params = cReq.getQueryParameters();
for (Entry<String, List<String>> e : params.entrySet()) {
StaashRequestContext.addContext("Q__" + e.getKey(), e.getValue().toString());
}
}
}
| 707 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/resources/StaashAdminResourceImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.resources;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import com.google.inject.Inject;
import com.netflix.staash.exception.StorageDoesNotExistException;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.EntityType;
import com.netflix.staash.service.MetaService;
import com.sun.jersey.spi.container.ResourceFilters;
@Path("/staash/v1/admin")
public class StaashAdminResourceImpl {
private final MetaService metasvc;
@Inject
public StaashAdminResourceImpl(MetaService meta) {
this.metasvc = meta;
}
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/db")
@ResourceFilters(StaashAuditFilter.class)
public String listSchemas() {
String schemas = metasvc.listSchemas();
return schemas;
}
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/storage")
@ResourceFilters(StaashAuditFilter.class)
public String listStorage() {
String storages = metasvc.listStorage();
return storages;
}
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/db/{schema}")
@ResourceFilters(StaashAuditFilter.class)
public String listTables(@PathParam("schema") String schema) {
String schemas = metasvc.listTablesInSchema(schema);
return schemas;
}
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/timeseries/{schema}")
@ResourceFilters(StaashAuditFilter.class)
public String listTimeseries(@PathParam("schema") String schema) {
String schemas = metasvc.listTimeseriesInSchema(schema);
return schemas;
}
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@Path("/db")
@ResourceFilters(StaashAuditFilter.class)
public String createSchema(String payLoad) {
if (payLoad!=null) {
try {
return metasvc.writeMetaEntity(EntityType.DB, payLoad);
} catch (StorageDoesNotExistException e) {
e.printStackTrace();
}
}
JsonObject obj = new JsonObject("{\"message\":\"payload can not be null must conform to: {name:<name>,cluster:<cluster>}\"");
return obj.toString();
}
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@Path("/storage")
@ResourceFilters(StaashAuditFilter.class)
public String createStorage(String payload) {
if (payload!=null) {
try {
return metasvc.writeMetaEntity(EntityType.STORAGE, payload);
} catch (StorageDoesNotExistException e) {
e.printStackTrace();
}
}
JsonObject obj = new JsonObject("{\"message\":\"payload can not be null must conform to: {name:<name>,cluster:<cluster>}\"");
return obj.toString();
}
@POST
@Path("{schema}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String createTable(@PathParam("schema") String schemaName, String payload) {
JsonObject msg;
try {
if (payload!=null) {
JsonObject obj;
obj = new JsonObject(payload).putString("db", schemaName);
return metasvc.writeMetaEntity(EntityType.TABLE, obj.toString());
}
msg = new JsonObject("{\"message\":\"payload can not be null must conform to: {name:<name>,cluster:<cluster>}\"");
} catch (StorageDoesNotExistException e) {
msg = new JsonObject("\"message\":\"Storage Does Not Exist\"");
}
return msg.toString();
}
@POST
@Path("/timeseries/{schema}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StaashAuditFilter.class)
public String createTimeseries(@PathParam("schema") String schemaName, String payload) {
JsonObject msg;
try {
if (payload!=null) {
JsonObject obj = new JsonObject(payload).putString("db", schemaName);
return metasvc.writeMetaEntity(EntityType.SERIES, obj.toString());
}
msg = new JsonObject("{\"message\":\"payload can not be null must conform to: {name:<name>,cluster:<cluster>}\"");
} catch (StorageDoesNotExistException e) {
msg = new JsonObject("\"message\":\"Storage Does Not Exist\"");
}
return msg.toString();
}
}
| 708 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/CqlDataDaoImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import static com.datastax.driver.core.querybuilder.QueryBuilder.select;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.List;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.PaasTableEntity;
import com.netflix.staash.rest.util.Pair;
import com.netflix.staash.service.CacheService;
import com.netflix.staash.storage.service.MySqlService;
public class CqlDataDaoImpl implements DataDao {
private MetaDao meta;
private Cluster cluster;
private Session session;
@Inject
public CqlDataDaoImpl(@Named("datacluster") Cluster cluster, MetaDao meta) {
this.cluster = cluster;
this.meta = meta;
this.session = this.cluster.connect();
//from the meta get the name of the cluster for this db
}
public String writeRow(String db, String table, JsonObject rowObj) {
String query = BuildRowInsertQuery(db, table, rowObj);
Print(query);
//String storage = rowObj.getField("storage");
String storage = meta.runQuery("com.netflix.test.storage",db+"."+table).get(db+"."+table).getString("storage");
if (storage!=null && storage.equals("mysql")) {
MySqlService.insertRowIntoTable(db, table, query);
} else {
session.execute(query);
}
JsonObject obj = new JsonObject("{\"status\":\"ok\"}");
return obj.toString();
}
private String BuildRowInsertQuery(String db, String table,
JsonObject rowObj) {
// TODO Auto-generated method stub
String columns = rowObj.getString("columns");
String values = rowObj.getString("values");
//String storage = rowObj.getField("storage");
String storage = meta.runQuery("com.netflix.test.storage",db+"."+table).get(db+"."+table).getString("storage");
if (storage!=null && storage.contains("mysql")) return "INSERT INTO" + " " + table + "(" + columns + ")"
+ " VALUES(" + values + ");";else
return "INSERT INTO" + " " + db + "." + table + "(" + columns + ")"
+ " VALUES(" + values + ");";
}
private void Print(String str) {
// TODO Auto-generated method stub
System.out.println(str);
}
private String BuildQuery(PaasTableEntity tableEnt) {
// TODO Auto-generated method stub
String schema = tableEnt.getSchemaName();
String tableName = tableEnt.getName();
List<Pair<String, String>> columns = tableEnt.getColumns();
String colStrs = "";
for (Pair<String, String> colPair : columns) {
colStrs = colStrs + colPair.getRight() + " " + colPair.getLeft()
+ ", ";
}
String primarykeys = tableEnt.getPrimarykey();
String PRIMARYSTR = "PRIMARY KEY(" + primarykeys + ")";
return "CREATE TABLE " + schema + "." + tableName + " (" + colStrs
+ " " + PRIMARYSTR + ");";
}
public String listRow(String db, String table, String keycol, String key) {
// TODO Auto-generated method stub
String storage = meta.runQuery("com.netflix.test.storage",db+"."+table).get(db+"."+table).getString("storage");
if (storage!=null && storage.contains("mysql")) {
String query = "select * from "+table+" where "+keycol+"=\'"+key+"\'";
Print(query);
java.sql.ResultSet rs = MySqlService.executeRead(db, query);
try {
while (rs.next()) {
ResultSetMetaData rsmd = rs.getMetaData();
String columns ="";
String values = "";
int count = rsmd.getColumnCount();
for (int i=1;i<=count;i++) {
String colName = rsmd.getColumnName(i);
columns = columns + colName + ",";
String value = rs.getString(i);
values = values + value +",";
}
JsonObject response = new JsonObject();
response.putString("columns", columns.substring(0, columns.length()-1));
response.putString("values", values.substring(0, values.length()-1));
return response.toString();
}
} catch (SQLException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
String query = select().all().from(db, table).where(eq(keycol, key))
.getQueryString();
Cluster cluster = Cluster.builder().addContactPoints("localhost").build();
Session session = cluster.connect(db);
ResultSet rs = session.execute(query);
return convertResultSet(rs);
}
private String convertResultSet(ResultSet rs) {
// TODO Auto-generated method stub
String colStr = "";
String rowStr = "";
JsonObject response = new JsonObject();
List<Row> rows = rs.all();
if (!rows.isEmpty() && rows.size() == 1) {
rowStr = rows.get(0).toString();
}
ColumnDefinitions colDefs = rs.getColumnDefinitions();
colStr = colDefs.toString();
response.putString("columns", colStr.substring(8, colStr.length() - 1));
response.putString("values", rowStr.substring(4, rowStr.length() - 1));
return response.toString();
}
public String writeEvent(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
Long evTime = rowObj.getLong("time");
String value = rowObj.getString("event");
Long periodicity = 100L;
Long rowKey = (evTime/periodicity)*periodicity;
String INSERTSTR = "insert into "+db+"."+table+"(key,column1,value) values('"+rowKey.toString()+"',"+evTime+",'"+
value+"');";
Print(INSERTSTR);
session.execute(INSERTSTR);
JsonObject obj = new JsonObject("{\"status\":\"ok\"}");
return obj.toString();
}
public String readEvent(String db, String table, String evTime) {
// TODO Auto-generated method stub
Long periodicity = 100L;
Long rowKey = (Long.valueOf(evTime)/periodicity)*periodicity;
String query = select().all().from(db, table).where(eq("key", String.valueOf(rowKey))).and(eq("column1",Long.valueOf(evTime)))
.getQueryString();
Cluster cluster = Cluster.builder().addContactPoints("localhost").build();
Session session = cluster.connect(db);
ResultSet rs = session.execute(query);
return convertResultSet(rs);
}
public String doJoin(String db, String table1, String table2,
String joincol, String value) {
String res1 = listRow(db,table1,joincol,value);
String res2 = listRow(db,table2,joincol,value);
return "{\""+table1+"\":"+res1+",\""+table2+"\":"+res2+"}";
}
}
| 709 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/CqlMetaDaoImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.exceptions.AlreadyExistsException;
import com.datastax.driver.core.exceptions.DriverException;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.staash.json.JsonArray;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.Entity;
import com.netflix.staash.rest.meta.entity.PaasDBEntity;
import com.netflix.staash.rest.meta.entity.PaasStorageEntity;
import com.netflix.staash.rest.meta.entity.PaasTableEntity;
import com.netflix.staash.rest.meta.entity.PaasTimeseriesEntity;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.rest.util.PaasUtils;
import com.netflix.staash.rest.util.Pair;
import com.netflix.staash.storage.service.MySqlService;
import static com.datastax.driver.core.querybuilder.QueryBuilder.*;
public class CqlMetaDaoImpl implements MetaDao {
private Cluster cluster;
Session session;
private static boolean schemaCreated = false;
static final String metaks = "paasmetaks";
static final String metacf = "metacf";
private Set<String> dbHolder = new HashSet<String>();
private Map<String,List<String>> dbToTableMap = new HashMap<String,List<String>>();
private Map<String,List<String>> dbToTimeseriesMap = new HashMap<String,List<String>>();
private Map<String, String> tableToStorageMap = new HashMap<String, String>();
private JsonObject jsonStorage = new JsonObject();
@Inject
public CqlMetaDaoImpl(@Named("metacluster") Cluster cluster) {
// Cluster cluster = Cluster.builder().addContactPoint("localhost")
// .build();
this.cluster = cluster;
this.session = this.cluster.connect();
maybeCreateMetaSchema();
LoadDbNames();
LoadDbToTableMap();
LoadDbToTimeSeriesMap();
LoadStorage();
LoadTableToStorage();
}
private void LoadTableToStorage() {
ResultSet rs = session
.execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+MetaConstants.STAASH_TABLE_ENTITY_TYPE+"';");
List<Row> rows = rs.all();
for (Row row : rows) {
String field = row.getString(0);
JsonObject val = new JsonObject(row.getString(1));
String storage = val.getField("storage");
tableToStorageMap.put(field, storage);
}
}
public Map<String,JsonObject> LoadStorage() {
ResultSet rs = session
.execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+MetaConstants.STAASH_STORAGE_TYPE_ENTITY+"';");
List<Row> rows = rs.all();
Map<String,JsonObject> storageMap = new HashMap<String,JsonObject>();
for (Row row : rows) {
String field = row.getString(0);
JsonObject val = new JsonObject(row.getString(1));
jsonStorage.putObject(field, val);
storageMap.put(field, val);
}
return storageMap;
}
private void LoadDbNames() {
ResultSet rs = session
.execute("select column1 from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='com.test.entity.type.paas.db';");
List<Row> rows = rs.all();
for (Row row : rows) {
dbHolder.add(row.getString(0));
}
}
private void LoadDbToTableMap() {
ResultSet rs = session
.execute("select column1 from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='com.test.entity.type.paas.table';");
List<Row> rows = rs.all();
for (Row row : rows) {
String key = row.getString(0).split("\\.")[0];
String table = row.getString(0).split("\\.")[1];
List<String> currval = null;
currval = dbToTableMap.get(key);
if (currval == null) {
currval = new ArrayList<String>();
}
currval.add(table);
dbToTableMap.put(key, currval);
}
}
private void LoadDbToTimeSeriesMap() {
ResultSet rs = session
.execute("select column1 from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='com.test.entity.type.paas.timeseries';");
List<Row> rows = rs.all();
for (Row row : rows) {
String key = row.getString(0).split("\\.")[0];
String table = row.getString(0).split("\\.")[1];
List<String> currval = null;
currval = dbToTimeseriesMap.get(key);
if (currval == null) {
currval = new ArrayList<String>();
}
currval.add(table);
dbToTimeseriesMap.put(key, currval);
}
}
public String writeMetaEntityOnly(Entity entity) {
session.execute(String.format(PaasUtils.INSERT_FORMAT, MetaConstants.META_KEY_SPACE + "."
+ MetaConstants.META_COLUMN_FAMILY, entity.getRowKey(), entity.getName(),
entity.getPayLoad()));
return "ok";
}
public String writeMetaEntity(Entity entity) {
try {
if (dbHolder.contains(entity.getName())) {
JsonObject obj = new JsonObject(
"{\"status\":\"error\",\"message\":\"db names must be unique\"}");
return obj.toString();
}
session.execute(String.format(PaasUtils.INSERT_FORMAT, MetaConstants.META_KEY_SPACE + "."
+ MetaConstants.META_COLUMN_FAMILY, entity.getRowKey(), entity.getName(),
entity.getPayLoad()));
if (entity instanceof PaasDBEntity) dbHolder.add(entity.getName());
if (entity instanceof PaasStorageEntity) jsonStorage.putObject(entity.getName(), new JsonObject(entity.getPayLoad()));
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
if (entity instanceof PaasTableEntity) {
// first create/check if schema db exists
PaasTableEntity tableEnt = (PaasTableEntity) entity;
String schemaName = tableEnt.getSchemaName();
String storage = tableEnt.getStorage();
try {
// String payLoad = tableEnt.getPayLoad();
if (storage!=null && storage.contains("mysql")) {
MySqlService.createDbInMySql(schemaName);
} //else {
session.execute(String.format(
PaasUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, schemaName, 1));
//}//create counterpart in cassandra
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
// if schema/db already exists now create the table
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
String query = BuildQuery(tableEnt);
Print(query);
if (storage!=null && storage.contains("mysql")) {
MySqlService.createTableInDb(schemaName, query);
} else {
storage="cassandra";
session.execute(query);
}
List<String> tables = dbToTableMap.get(tableEnt.getSchemaName());
if (tables==null) tables = new ArrayList<String>();
tables.add(tableEnt.getName());
tableToStorageMap.put(tableEnt.getName(), storage);
// List<String> primaryKeys = entity.getPrimaryKey();
}
if (entity instanceof PaasTimeseriesEntity) {
// first create/check if schema db exists
PaasTimeseriesEntity tableEnt = (PaasTimeseriesEntity) entity;
try {
String schemaName = tableEnt.getSchemaName();
session.execute(String.format(
PaasUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, schemaName, 1));
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
// if schema/db already exists now create the table
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
String query = BuildQuery(tableEnt);
Print(query);
session.execute(query);
List<String> tables = dbToTimeseriesMap.get(tableEnt.getSchemaName());
if (tables==null) tables = new ArrayList<String>();
tables.add(tableEnt.getName().substring(tableEnt.getName().indexOf(".")+1));
// List<String> primaryKeys = entity.getPrimaryKey();
}
JsonObject obj = new JsonObject("{\"status\":\"ok\"}");
return obj.toString();
}
public String writeRow(String db, String table, JsonObject rowObj) {
String query = BuildRowInsertQuery(db, table, rowObj);
Print(query);
String storage = tableToStorageMap.get(db+"."+table);
if (storage!=null && storage.equals("mysql")) {
MySqlService.insertRowIntoTable(db, table, query);
} else {
session.execute(query);
}
JsonObject obj = new JsonObject("{\"status\":\"ok\"}");
return obj.toString();
}
private String BuildRowInsertQuery(String db, String table,
JsonObject rowObj) {
// TODO Auto-generated method stub
String columns = rowObj.getString("columns");
String values = rowObj.getString("values");
String storage = tableToStorageMap.get(db+"."+table);
if (storage!=null && storage.equals("mysql")) {
return "INSERT INTO" + " " + table + "(" + columns + ")"
+ " VALUES(" + values + ");";
}else {
return "INSERT INTO" + " " + db + "." + table + "(" + columns + ")"
+ " VALUES(" + values + ");";
}
}
private void Print(String str) {
// TODO Auto-generated method stub
System.out.println(str);
}
private String BuildQuery(PaasTableEntity tableEnt) {
// TODO Auto-generated method stub
String storage = tableEnt.getStorage();
if (storage!=null && storage.equals("mysql")) {
String schema = tableEnt.getSchemaName();
String tableName = tableEnt.getName().split("\\.")[1];
List<Pair<String, String>> columns = tableEnt.getColumns();
String colStrs = "";
for (Pair<String, String> colPair : columns) {
colStrs = colStrs + colPair.getRight() + " " + colPair.getLeft()
+ ", ";
}
String primarykeys = tableEnt.getPrimarykey();
String PRIMARYSTR = "PRIMARY KEY(" + primarykeys + ")";
return "CREATE TABLE " + tableName + " (" + colStrs
+ " " + PRIMARYSTR + ");";
} else {
String schema = tableEnt.getSchemaName();
String tableName = tableEnt.getName().split("\\.")[1];
List<Pair<String, String>> columns = tableEnt.getColumns();
String colStrs = "";
for (Pair<String, String> colPair : columns) {
colStrs = colStrs + colPair.getRight() + " " + colPair.getLeft()
+ ", ";
}
String primarykeys = tableEnt.getPrimarykey();
String PRIMARYSTR = "PRIMARY KEY(" + primarykeys + ")";
return "CREATE TABLE " + schema + "." + tableName + " (" + colStrs
+ " " + PRIMARYSTR + ");";
}
}
private String BuildQuery(PaasTimeseriesEntity tableEnt) {
// TODO Auto-generated method stub
String schema = tableEnt.getSchemaName();
String tableName = tableEnt.getName().split("\\.")[1];
List<Pair<String, String>> columns = tableEnt.getColumns();
String colStrs = "";
for (Pair<String, String> colPair : columns) {
colStrs = colStrs + colPair.getRight() + " " + colPair.getLeft()
+ ", ";
}
String primarykeys = tableEnt.getPrimarykey();
String PRIMARYSTR = "PRIMARY KEY(" + primarykeys + ")";
return "CREATE TABLE " + schema + "." + tableName + " (" + colStrs
+ " " + PRIMARYSTR + ");";
}
public void maybeCreateMetaSchema() {
try {
if (schemaCreated)
return;
try {
session.execute(String.format(
PaasUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, metaks, 1));
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
session.execute("USE " + metaks);
for (String tableDef : getTableDefinitions()) {
try {
session.execute(tableDef);
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
}
schemaCreated = true;
} catch (DriverException e) {
throw e;
}
}
protected Collection<String> getTableDefinitions() {
String metaDynamic = "CREATE TABLE metacf (\n" + " key text,\n"
+ " column1 text,\n" + " value text,\n"
+ " PRIMARY KEY (key, column1)\n"
+ ") WITH COMPACT STORAGE;";
List<String> allDefs = new ArrayList<String>();
allDefs.add(metaDynamic);
return allDefs;
}
public Entity readMetaEntity(String rowKey) {
// TODO Auto-generated method stub
return null;
}
public String listRow(String db, String table, String keycol, String key) {
// TODO Auto-generated method stub
String query = select().all().from(db, table).where(eq(keycol, key))
.getQueryString();
ResultSet rs = session.execute(query);
return convertResultSet(rs);
}
private String convertResultSet(ResultSet rs) {
// TODO Auto-generated method stub
String colStr = "";
String rowStr = "";
JsonObject response = new JsonObject();
List<Row> rows = rs.all();
if (!rows.isEmpty() && rows.size() == 1) {
rowStr = rows.get(0).toString();
}
ColumnDefinitions colDefs = rs.getColumnDefinitions();
colStr = colDefs.toString();
response.putString("columns", colStr.substring(8, colStr.length() - 1));
response.putString("values", rowStr.substring(4, rowStr.length() - 1));
return response.toString();
}
public String listSchemas() {
// TODO Auto-generated method stub
JsonObject obj = new JsonObject();
JsonArray arr = new JsonArray();
for (String db: dbHolder) {
arr.addString(db);
}
obj.putArray("schemas", arr);
return obj.toString();
}
public String listTablesInSchema(String schemaname) {
// TODO Auto-generated method stub
JsonObject obj = new JsonObject();
JsonArray arr = new JsonArray();
List<String> tblNames = dbToTableMap.get(schemaname);
for (String name: tblNames) {
arr.addString(name);
}
obj.putArray(schemaname, arr);
return obj.toString();
}
public String listTimeseriesInSchema(String schemaname) {
// TODO Auto-generated method stub
JsonObject obj = new JsonObject();
JsonArray arr = new JsonArray();
List<String> tblNames = dbToTimeseriesMap.get(schemaname);
for (String name: tblNames) {
arr.addString(name);
}
obj.putArray(schemaname, arr);
return obj.toString();
}
public String listStorage() {
// TODO Auto-generated method stub
return jsonStorage.toString();
}
public Map<String, String> getTableToStorageMap() {
return tableToStorageMap;
}
public Map<String, String> getStorageMap() {
// TODO Auto-generated method stub
return tableToStorageMap;
}
public Map<String, JsonObject> runQuery(String key, String col) {
// TODO Auto-generated method stub
ResultSet rs = session
.execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+key+"' and column1='"+col+"';");
List<Row> rows = rs.all();
Map<String,JsonObject> storageMap = new HashMap<String,JsonObject>();
for (Row row : rows) {
String field = row.getString(0);
JsonObject val = new JsonObject(row.getString(1));
storageMap.put(field, val);
}
return storageMap;
}
}
| 710 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/DataDao.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import com.netflix.staash.json.JsonObject;
public interface DataDao {
public String writeRow(String db, String table, JsonObject rowObj);
public String listRow(String db, String table, String keycol, String key);
public String writeEvent(String db, String table, JsonObject rowObj);
public String readEvent(String db, String table, String eventTime);
public String doJoin(String db, String table1, String table2,
String joincol, String value);
}
| 711 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/CqlMetaDaoImplNew.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.exceptions.AlreadyExistsException;
import com.datastax.driver.core.exceptions.DriverException;
import com.netflix.staash.json.JsonArray;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.Entity;
import com.netflix.staash.rest.meta.entity.EntityType;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.rest.util.PaasUtils;
public class CqlMetaDaoImplNew implements MetaDao {
private Cluster cluster;
private Session session;
// List<String> dbHolder = new ArrayList<String>();
// Map<String, String> tableToStorageMap = new ConcurrentHashMap<String, String>();
// Map<String,JsonObject> storageMap = new ConcurrentHashMap<String,JsonObject>();
// Map<String, List<String>> dbToTableMap = new ConcurrentHashMap<String, List<String>>();
// Map<String, List<String>> dbToTimeseriesMap = new ConcurrentHashMap<String, List<String>>();
private boolean schemaCreated = false;
public CqlMetaDaoImplNew(Cluster cluster) {
this.cluster = cluster;
this.session = this.cluster.connect();
// LoadStorage();
// LoadDbNames();
// LoadDbToTableMap();
// LoadDbToTimeSeriesMap();
// LoadTableToStorage();
// TODO Auto-generated constructor stub
}
private void maybeCreateMetaSchema() {
try {
if (schemaCreated)
return;
try {
session.execute(String.format(
PaasUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, MetaConstants.META_KEY_SPACE, 1));
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
session.execute("USE " + MetaConstants.META_KEY_SPACE);
for (String tableDef : getTableDefinitions()) {
try {
session.execute(tableDef);
} catch (AlreadyExistsException e) {
// It's ok, ignore
}
}
schemaCreated = true;
} catch (DriverException e) {
throw e;
}
}
protected Collection<String> getTableDefinitions() {
String metaDynamic = "CREATE TABLE metacf (\n" + " key text,\n"
+ " column1 text,\n" + " value text,\n"
+ " PRIMARY KEY (key, column1)\n"
+ ") WITH COMPACT STORAGE;";
List<String> allDefs = new ArrayList<String>();
allDefs.add(metaDynamic);
return allDefs;
}
public String writeMetaEntity(Entity entity) {
session.execute(String.format(PaasUtils.INSERT_FORMAT, MetaConstants.META_KEY_SPACE + "."
+ MetaConstants.META_COLUMN_FAMILY, entity.getRowKey(), entity.getName(),
entity.getPayLoad()));
//addEntityToCache(entity.getRowKey(), entity);
return "{\"msg\":\"ok\"";
}
// public String listStorage() {
// Set<String> allStorage = storageMap.keySet();
// JsonObject obj = new JsonObject();
// JsonArray arr = new JsonArray();
// for (String storage: allStorage) {
// arr.addString(storage);
// }
// obj.putArray("storages", arr);
// return obj.toString();
// }
// public String listSchemas(){
// JsonObject obj = new JsonObject();
// JsonArray arr = new JsonArray();
// for (String db: dbHolder) {
// arr.addString(db);
// }
// obj.putArray("schemas", arr);
// return obj.toString();
// }
// public String listTablesInSchema(String db) {
// List<String> tables = dbToTableMap.get(db);
// JsonObject obj = new JsonObject();
// JsonArray arr = new JsonArray();
// for (String table: tables) {
// arr.addString(table);
// }
// obj.putArray(db, arr);
// return obj.toString();
// }
// public String listTimeseriesInSchema(String db) {
// List<String> tables = dbToTimeseriesMap.get(db);
// JsonObject obj = new JsonObject();
// JsonArray arr = new JsonArray();
// for (String table: tables) {
// arr.addString(table);
// }
// obj.putArray(db, arr);
// return obj.toString();
// }
// private void addEntityToCache(String rowkey, Entity entity) {
// switch (EntityType.valueOf(rowkey)) {
// case STORAGE:
// storageMap.put(entity.getName(), new JsonObject(entity.getPayLoad()));
// break;
// case DB:
// dbHolder.add(entity.getName());
// break;
// case TABLE:
// JsonObject payobject = new JsonObject(entity.getPayLoad());
// tableToStorageMap.put(entity.getName(), payobject.getString("storage"));
// String db = payobject.getString("db");
// List<String> tables = dbToTableMap.get(db);
// if (tables == null || tables.size() == 0) {
// tables = new ArrayList<String>();
// tables.add(entity.getName());
// } else {
// tables.add(entity.getName());
// }
// dbToTableMap.put(db, tables);
// break;
//
// case SERIES:
// JsonObject tsobject = new JsonObject(entity.getPayLoad());
// tableToStorageMap.put(entity.getName(), tsobject.getString("storage"));
// String dbname = tsobject.getString("db");
// List<String> alltables = dbToTableMap.get(dbname);
// if (alltables == null || alltables.size() == 0) {
// alltables = new ArrayList<String>();
// alltables.add(entity.getName());
// } else {
// alltables.add(entity.getName());
// }
// dbToTimeseriesMap.put(dbname, alltables);
// break;
// }
// }
// private void LoadTableToStorage() {
// ResultSet rs = session
// .execute("select column1, value from paasmetaks.metacf where key='"+MetaConstants.PAAS_TABLE_ENTITY_TYPE+"';");
// List<Row> rows = rs.all();
// for (Row row : rows) {
// String field = row.getString(0);
// JsonObject val = new JsonObject(row.getString(1));
// String storage = val.getField("storage");
// tableToStorageMap.put(field, storage);
// }
// }
// public Map<String,JsonObject> LoadStorage() {
// ResultSet rs = session
// .execute("select column1, value from paasmetaks.metacf where key='"+MetaConstants.PAAS_STORAGE_TYPE_ENTITY+"';");
// List<Row> rows = rs.all();
// for (Row row : rows) {
// String field = row.getString(0);
// JsonObject val = new JsonObject(row.getString(1));
// storageMap.put(field, val);
// }
// return storageMap;
// }
//
// private void LoadDbNames() {
// ResultSet rs = session
// .execute("select column1 from paasmetaks.metacf where key='com.test.entity.type.paas.db';");
// List<Row> rows = rs.all();
// for (Row row : rows) {
// dbHolder.add(row.getString(0));
// }
// }
// private void LoadDbToTableMap() {
// ResultSet rs = session
// .execute("select column1 from paasmetaks.metacf where key='com.test.entity.type.paas.table';");
// List<Row> rows = rs.all();
// for (Row row : rows) {
// String key = row.getString(0).split("\\.")[0];
// String table = row.getString(0).split("\\.")[1];
// List<String> currval = null;
// currval = dbToTableMap.get(key);
// if (currval == null) {
// currval = new ArrayList<String>();
// }
// currval.add(table);
// dbToTableMap.put(key, currval);
// }
// }
// private void LoadDbToTimeSeriesMap() {
// ResultSet rs = session
// .execute("select column1 from paasmetaks.metacf where key='com.test.entity.type.paas.timeseries';");
// List<Row> rows = rs.all();
// for (Row row : rows) {
// String key = row.getString(0).split("\\.")[0];
// String table = row.getString(0).split("\\.")[1];
// List<String> currval = null;
// currval = dbToTimeseriesMap.get(key);
// if (currval == null) {
// currval = new ArrayList<String>();
// }
// currval.add(table);
// dbToTimeseriesMap.put(key, currval);
// }
// }
// public Entity readMetaEntity(String rowKey) {
// // TODO Auto-generated method stub
// return null;
// }
//
// public String writeRow(String db, String table, JsonObject rowObj) {
// // TODO Auto-generated method stub
// return null;
// }
//
// public String listRow(String db, String table, String keycol, String key) {
// // TODO Auto-generated method stub
// return null;
// }
//
public Map<String, String> getStorageMap() {
// TODO Auto-generated method stub
return null;
}
public Map<String, JsonObject> runQuery(String key, String col) {
// TODO Auto-generated method stub
ResultSet rs;
if (col!=null && !col.equals("*")) {
rs = session
.execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+key+"' and column1='"+col+"';");
}
else {
rs = session
.execute("select column1, value from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='"+key+"';");
}
List<Row> rows = rs.all();
Map<String,JsonObject> storageMap = new HashMap<String,JsonObject>();
for (Row row : rows) {
String field = row.getString(0);
JsonObject val = new JsonObject(row.getString(1));
storageMap.put(field, val);
}
return storageMap;
}
}
| 712 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/MetaDao.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import java.util.Map;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.Entity;
public interface MetaDao {
public String writeMetaEntity(Entity entity);
public Map<String, JsonObject> runQuery(String key, String col);
}
| 713 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/AstyanaxDataDaoImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import com.netflix.staash.json.JsonObject;
public class AstyanaxDataDaoImpl implements DataDao{
public String writeRow(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String listRow(String db, String table, String keycol, String key) {
// TODO Auto-generated method stub
return null;
}
public String writeEvent(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String readEvent(String db, String table, String keycol) {
// TODO Auto-generated method stub
return null;
}
public String doJoin(String db, String table1, String table2,
String joincol, String value) {
// TODO Auto-generated method stub
return null;
}
}
| 714 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/CqlDataDaoImplNew.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import com.datastax.driver.core.Cluster;
import com.netflix.staash.json.JsonObject;
public class CqlDataDaoImplNew extends CqlDataDaoImpl{
public CqlDataDaoImplNew(Cluster cluster, MetaDao meta) {
super(cluster, meta);
// TODO Auto-generated constructor stub
}
public String writeRow(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String listRow(String db, String table, String keycol, String key) {
// TODO Auto-generated method stub
return null;
}
public String writeEvent(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String readEvent(String db, String table, String eventTime) {
// TODO Auto-generated method stub
return null;
}
public String doJoin(String db, String table1, String table2,
String joincol, String value) {
// TODO Auto-generated method stub
return null;
}
}
| 715 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/AstyanaxMetaDaoImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import java.util.HashMap;
import java.util.Map;
import org.apache.log4j.Logger;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.meta.entity.Entity;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.rest.util.PaasUtils;
import com.netflix.staash.rest.util.StaashRequestContext;
public class AstyanaxMetaDaoImpl implements MetaDao {
private Keyspace keyspace;
private Logger logger = Logger.getLogger(AstyanaxMetaDaoImpl.class);
private static final DynamicStringProperty METASTRATEGY = DynamicPropertyFactory
.getInstance().getStringProperty("staash.metastrategy",
"NetworkTopologyStrategy");
private static final DynamicStringProperty METARF = DynamicPropertyFactory
.getInstance().getStringProperty("staash.metareplicationfactor",
"us-east:3");
static ColumnFamily<String, String> METACF = ColumnFamily.newColumnFamily(
MetaConstants.META_COLUMN_FAMILY, StringSerializer.get(), StringSerializer.get());
@Inject
public AstyanaxMetaDaoImpl(@Named("astmetaks") Keyspace keyspace) {
this.keyspace = keyspace;
try {
keyspace.describeKeyspace();
logger.info("keyspaces for staash exists already");
StaashRequestContext.addContext("Meta_Init",
"keyspace already existed");
} catch (ConnectionException ex) {
StaashRequestContext.addContext("Meta_Init",
"Keyspace did not exist , creating keyspace "+MetaConstants.META_KEY_SPACE
);
maybecreateschema();
}
}
private Map<String, Object> populateMap() {
Builder<String, Object> rfMap = ImmutableMap.<String, Object> builder();
String rfStr = METARF.getValue();
String[] pairs = rfStr.split(",");
for (String pair : pairs) {
String[] kv = pair.split(":");
rfMap.put(kv[0], kv[1]);
}
return rfMap.build();
}
private void maybecreateschema() {
try {
boolean b = true;
logger.info("Strategy: " + METASTRATEGY.getValue() + " RF: "
+ METARF.getValue());
try {
b = keyspace.getConnectionPool().hasHost(
new Host("localhost:9160", 9160));
} catch (ConnectionException ex) {
}
if (b) {
keyspace.createKeyspace(ImmutableMap
.<String, Object> builder()
.put("strategy_options",
ImmutableMap.<String, Object> builder()
.put("replication_factor", "1").build())
.put("strategy_class", "SimpleStrategy").build());
} else {
// keyspace.createKeyspace(ImmutableMap
// .<String, Object> builder()
// .put("strategy_options",
// ImmutableMap.<String, Object> builder()
// .put("us-east", "3").build())
// .put("strategy_class", METASTRATEGY).build());
keyspace.createKeyspace(ImmutableMap.<String, Object> builder()
.put("strategy_options", populateMap())
.put("strategy_class", METASTRATEGY.getValue()).build());
}
StaashRequestContext.addContext("Meta_Init",
"Keyspace did not exist , created keyspace "+MetaConstants.META_KEY_SPACE +" with rf:"
+ METARF.getValue());
} catch (ConnectionException e) {
// If we are here that means the meta artifacts already exist
logger.info("keyspaces for staash exists already");
StaashRequestContext.addContext("Meta_Init",
"keyspace already existed");
}
try {
String metaDynamic = "CREATE TABLE " + MetaConstants.META_COLUMN_FAMILY +"(\n" + " key text,\n"
+ " column1 text,\n" + " value text,\n"
+ " PRIMARY KEY (key, column1)\n"
+ ") WITH COMPACT STORAGE;";
keyspace.prepareCqlStatement().withCql(metaDynamic).execute();
StaashRequestContext
.addContext("Meta_Init",
"Columnfamily did not exist , created column family "+MetaConstants.META_COLUMN_FAMILY + " in keyspace "+MetaConstants.META_KEY_SPACE);
} catch (ConnectionException e) {
// if we are here means meta artifacts already exists, ignore
logger.info("staash column family exists");
StaashRequestContext.addContext("Meta_Init",
"Columnfamily already existed");
}
}
public String writeMetaEntity(Entity entity) {
try {
String stmt = String.format(PaasUtils.INSERT_FORMAT, MetaConstants.META_KEY_SPACE
+ "." + MetaConstants.META_COLUMN_FAMILY,
entity.getRowKey(), entity.getName(), entity.getPayLoad());
keyspace.prepareCqlStatement().withCql(stmt).execute();
StaashRequestContext.addContext("Meta_Write", "write succeeded on meta: "+ entity!=null?entity.getPayLoad():null);
} catch (ConnectionException e) {
logger.info("Write of the entity failed "+entity!=null?entity.getPayLoad():null);
StaashRequestContext.addContext("Meta_Write", "write failed on meta: "+ entity!=null?entity.getPayLoad():null);
throw new RuntimeException(e.getMessage());
}
return "{\"msg\":\"ok\"}";
}
public Map<String, String> getStorageMap() {
return null;
}
public Map<String, JsonObject> runQuery(String key, String col) {
OperationResult<CqlStatementResult> rs;
Map<String, JsonObject> resultMap = new HashMap<String, JsonObject>();
try {
String queryStr = "";
if (col != null && !col.equals("*")) {
queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
+ key + "' and column1='" + col + "';";
} else {
queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
+ key + "';";
}
rs = keyspace.prepareCqlStatement().withCql(queryStr).execute();
for (Row<String, String> row : rs.getResult().getRows(METACF)) {
ColumnList<String> columns = row.getColumns();
String key1 = columns.getStringValue("column1", null);
String val1 = columns.getStringValue("value", null);
resultMap.put(key1, new JsonObject(val1));
}
} catch (ConnectionException e) {
e.printStackTrace();
throw new RuntimeException(e.getMessage());
}
return resultMap;
}
}
| 716 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/dao/MysqlDaoImpl.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.dao;
import com.netflix.staash.json.JsonObject;
public class MysqlDaoImpl {
public MysqlDaoImpl() {
}
public String writeRow(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String listRow(String db, String table, String keycol, String key) {
// TODO Auto-generated method stub
return null;
}
public String writeEvent(String db, String table, JsonObject rowObj) {
// TODO Auto-generated method stub
return null;
}
public String readEvent(String db, String table, String eventTime) {
// TODO Auto-generated method stub
return null;
}
public void createTable() {
}
}
| 717 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/PaasDBEntity.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.MetaConstants;
public class PaasDBEntity extends Entity{
public static class Builder {
private PaasDBEntity entity = new PaasDBEntity();
public Builder withJsonPayLoad(JsonObject payLoad) {
entity.setRowKey(MetaConstants.STAASH_DB_ENTITY_TYPE);
String payLoadName = payLoad.getString("name");
String load = payLoad.toString();
entity.setName(payLoadName);
entity.setPayLoad(load);
return this;
}
public PaasDBEntity build() {
return entity;
}
}
public static Builder builder() {
return new Builder();
}
}
| 718 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/PaasTableEntity.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
import java.util.ArrayList;
import java.util.List;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.rest.util.Pair;
public class PaasTableEntity extends Entity{
private String schemaName;
private List<Pair<String, String>> columns = new ArrayList<Pair<String, String>>();
private String primarykey;
private String storage;
public static class Builder {
private PaasTableEntity entity = new PaasTableEntity();
public Builder withJsonPayLoad(JsonObject payLoad, String schemaName) {
entity.setRowKey(MetaConstants.STAASH_TABLE_ENTITY_TYPE);
entity.setSchemaName(schemaName);
String payLoadName = payLoad.getString("name");
String load = payLoad.toString();
entity.setName(schemaName+"."+payLoadName);
String columnswithtypes = payLoad.getString("columns");
String[] allCols = columnswithtypes.split(",");
String storage = payLoad.getString("storage");
for (String col:allCols) {
String type;
String name;
if (!col.contains(":")) {
if (storage!=null && storage.contains("mysql")) type = "varchar(256)";
else type="text";
name=col;
}
else {
name = col.split(":")[0];
type = col.split(":")[1];
}
Pair<String, String> p = new Pair<String, String>(type, name);
entity.addColumn(p);
}
entity.setPrimarykey(payLoad.getString("primarykey"));
entity.setStorage(storage);
entity.setPayLoad(load);
return this;
}
public PaasTableEntity build() {
return entity;
}
}
public static Builder builder() {
return new Builder();
}
public String getSchemaName() {
return schemaName;
}
private void setSchemaName(String schemaname) {
this.schemaName = schemaname;
}
private void addColumn(Pair<String, String> pair) {
columns.add(pair);
}
public List<Pair<String,String>> getColumns() {
return columns;
}
public String getPrimarykey() {
return primarykey;
}
private void setPrimarykey(String primarykey) {
this.primarykey = primarykey;
}
private void setStorage(String storagename) {
this.storage = storagename;
}
public String getStorage() {
return storage;
}
}
| 719 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/EntityType.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
import com.netflix.staash.rest.util.MetaConstants;
public enum EntityType {
STORAGE(MetaConstants.STAASH_STORAGE_TYPE_ENTITY),DB(MetaConstants.STAASH_DB_ENTITY_TYPE),TABLE(MetaConstants.STAASH_TABLE_ENTITY_TYPE),SERIES(MetaConstants.STAASH_TS_ENTITY_TYPE);
private String id;
EntityType(String id) {
this.id = id;
}
public String getId(){
return id;
}
}
| 720 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/PaasStorageEntity.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.MetaConstants;
public class PaasStorageEntity extends Entity{
public static class Builder {
private PaasStorageEntity entity = new PaasStorageEntity();
public Builder withJsonPayLoad(JsonObject payLoad) {
entity.setRowKey(MetaConstants.STAASH_STORAGE_TYPE_ENTITY);
String payLoadName = payLoad.getString("name");
String load = payLoad.toString();
entity.setName(payLoadName);
entity.setPayLoad(load);
return this;
}
public PaasStorageEntity build() {
return entity;
}
}
public static Builder builder() {
return new Builder();
}
}
| 721 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/PaasTimeseriesEntity.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
import java.util.ArrayList;
import java.util.List;
import com.netflix.staash.json.JsonObject;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.rest.util.Pair;
public class PaasTimeseriesEntity extends Entity{
private String schemaName;
private List<Pair<String, String>> columns = new ArrayList<Pair<String, String>>();
private String primarykey;
public static class Builder {
private PaasTimeseriesEntity entity = new PaasTimeseriesEntity();
public Builder withJsonPayLoad(JsonObject payLoad, String schemaName) {
entity.setRowKey(MetaConstants.STAASH_TS_ENTITY_TYPE);
entity.setSchemaName(schemaName);
String payLoadName = payLoad.getString("name");
entity.setName(schemaName+"."+payLoadName);
String type = payLoad.getString("seriestype");
Pair<String, String> keycol;
String columns = "";
if (type!=null && type.equals(MetaConstants.PERIOD_TIME_SERIES))
keycol = new Pair<String, String>("timestamp", "key");
else keycol = new Pair<String, String>("text", "key");
Pair<String, String> eventcol = new Pair<String, String>("timestamp", "column1");
Pair<String, String> valuecol = new Pair<String, String>("text", "value");
columns = keycol.getRight()+":"+keycol.getLeft()+","+eventcol.getRight()+":"+eventcol.getLeft()+","+valuecol.getRight()+":"+valuecol.getLeft();
entity.addColumn(keycol);
entity.addColumn(eventcol);
entity.addColumn(valuecol);
entity.setPrimarykey("key,column1");
payLoad.putString("columns", columns);
payLoad.putString("primarykey", entity.getPrimarykey());
String load = payLoad.toString();
entity.setPayLoad(load);
return this;
}
public PaasTimeseriesEntity build() {
return entity;
}
}
public static Builder builder() {
return new Builder();
}
public String getSchemaName() {
return schemaName;
}
private void setSchemaName(String schemaname) {
this.schemaName = schemaname;
}
private void addColumn(Pair<String, String> pair) {
columns.add(pair);
}
public List<Pair<String,String>> getColumns() {
return columns;
}
public String getPrimarykey() {
return primarykey;
}
private void setPrimarykey(String primarykey) {
this.primarykey = primarykey;
}
}
| 722 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/meta/entity/Entity.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.meta.entity;
public class Entity {
protected String rowKey;
protected String name;
protected String payLoad;
public String getRowKey() {
return rowKey;
}
public String getName() {
return name;
}
protected void setRowKey(String rowkey) {
this.rowKey = rowkey;
}
protected void setName(String name) {
this.name = name;
}
public String getPayLoad() {
return payLoad;
}
protected void setPayLoad(String payLoad) {
this.payLoad = payLoad;
}
}
| 723 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/main/PaasMain.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.main;
public class PaasMain {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
}
}
| 724 |
0 |
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest
|
Create_ds/staash/staash-svc/src/main/java/com/netflix/staash/rest/modules/PaasPropertiesModule.java
|
/*******************************************************************************
* /*
* *
* * Copyright 2013 Netflix, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* *
******************************************************************************/
package com.netflix.staash.rest.modules;
import java.net.URL;
import java.util.Properties;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.policies.RoundRobinPolicy;
import com.datastax.driver.core.policies.TokenAwarePolicy;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import com.google.inject.name.Names;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
import com.netflix.staash.cassandra.discovery.EurekaAstyanaxHostSupplier;
import com.netflix.staash.connection.ConnectionFactory;
import com.netflix.staash.connection.PaasConnectionFactory;
import com.netflix.staash.rest.dao.AstyanaxDataDaoImpl;
import com.netflix.staash.rest.dao.AstyanaxMetaDaoImpl;
import com.netflix.staash.rest.dao.CqlDataDaoImpl;
import com.netflix.staash.rest.dao.CqlMetaDaoImpl;
import com.netflix.staash.rest.dao.CqlMetaDaoImplNew;
import com.netflix.staash.rest.dao.DataDao;
import com.netflix.staash.rest.dao.MetaDao;
import com.netflix.staash.rest.util.HostSupplier;
import com.netflix.staash.rest.util.MetaConstants;
import com.netflix.staash.service.CacheService;
import com.netflix.staash.service.DataService;
import com.netflix.staash.service.MetaService;
import com.netflix.staash.service.PaasDataService;
import com.netflix.staash.service.PaasMetaService;
public class PaasPropertiesModule extends AbstractModule {
@Override
protected void configure() {
try {
Properties props = loadProperties();
Names.bindProperties(binder(), props);
} catch (Exception e) {
e.printStackTrace();
}
}
private static Properties loadProperties() throws Exception {
Properties properties = new Properties();
ClassLoader loader = PaasPropertiesModule.class.getClassLoader();
URL url = loader.getResource("staash.properties");
properties.load(url.openStream());
return properties;
}
@Provides
@Named("metacluster")
Cluster provideCluster(@Named("staash.cassclient") String clientType,@Named("staash.metacluster") String clustername) {
if (clientType.equals("cql")) {
Cluster cluster = Cluster.builder().addContactPoint(clustername).build();
return cluster;
} else return null;
}
@Provides
HostSupplier provideHostSupplier(@Named("staash.metacluster") String clustername) {
return null;
}
@Provides
@Named("astmetaks")
Keyspace provideKeyspace(@Named("staash.metacluster") String clustername,EurekaAstyanaxHostSupplier hs) {
String clusterNameOnly = "";
String[] clusterinfo = clustername.split(":");
if (clusterinfo != null && clusterinfo.length == 2) {
clusterNameOnly = clusterinfo[0];
} else {
clusterNameOnly = clustername;
}
AstyanaxContext<Keyspace> keyspaceContext = new AstyanaxContext.Builder()
.forCluster(clusterNameOnly)
.forKeyspace(MetaConstants.META_KEY_SPACE)
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl()
.setDiscoveryType(
NodeDiscoveryType.RING_DESCRIBE)
.setConnectionPoolType(
ConnectionPoolType.TOKEN_AWARE)
.setDiscoveryDelayInSeconds(60)
.setTargetCassandraVersion("1.2")
.setCqlVersion("3.0.0"))
.withHostSupplier(hs.getSupplier(clustername))
.withConnectionPoolConfiguration(
new ConnectionPoolConfigurationImpl(clusterNameOnly
+ "_" + MetaConstants.META_KEY_SPACE)
.setSocketTimeout(11000)
.setConnectTimeout(2000)
.setMaxConnsPerHost(10).setInitConnsPerHost(3))
.withConnectionPoolMonitor(new CountingConnectionPoolMonitor())
.buildKeyspace(ThriftFamilyFactory.getInstance());
keyspaceContext.start();
Keyspace keyspace;
keyspace = keyspaceContext.getClient();
return keyspace;
}
@Provides
@Named("datacluster")
Cluster provideDataCluster(@Named("staash.datacluster") String clustername) {
Cluster cluster = Cluster.builder().addContactPoint(clustername).build();
return cluster;
}
@Provides
MetaDao provideCqlMetaDao(@Named("staash.cassclient") String clientType, @Named("metacluster") Cluster cluster,@Named("astmetaks") Keyspace keyspace) {
if (clientType.equals("cql"))
return new CqlMetaDaoImpl(cluster );
else return new AstyanaxMetaDaoImpl(keyspace);
}
@Provides
DataDao provideCqlDataDao(@Named("staash.cassclient") String clientType, @Named("datacluster") Cluster cluster, MetaDao meta) {
if (clientType.equals("cql"))
return new CqlDataDaoImpl(cluster, meta);
else return new AstyanaxDataDaoImpl();
}
@Provides
@Named("pooledmetacluster")
Cluster providePooledCluster(@Named("staash.cassclient") String clientType,@Named("staash.metacluster") String clustername) {
if (clientType.equals("cql")) {
Cluster cluster = Cluster.builder().withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())).addContactPoint(clustername).build();
return cluster;
}else {
return null;
}
}
@Provides
@Named("newmetadao")
MetaDao provideCqlMetaDaoNew(@Named("staash.cassclient") String clientType, @Named("metacluster") Cluster cluster, @Named("astmetaks") Keyspace keyspace) {
if (clientType.equals("cql"))
return new CqlMetaDaoImplNew(cluster );
else return new AstyanaxMetaDaoImpl(keyspace);
}
@Provides
MetaService providePaasMetaService(@Named("newmetadao") MetaDao metad, ConnectionFactory fac, CacheService cache) {
PaasMetaService metasvc = new PaasMetaService(metad, fac, cache);
return metasvc;
}
@Provides
DataService providePaasDataService( MetaService metasvc, ConnectionFactory fac) {
PaasDataService datasvc = new PaasDataService(metasvc, fac);
return datasvc;
}
@Provides
CacheService provideCacheService(@Named("newmetadao") MetaDao metad) {
return new CacheService(metad);
}
@Provides
ConnectionFactory provideConnectionFactory(@Named("staash.cassclient") String clientType,EurekaAstyanaxHostSupplier hs) {
return new PaasConnectionFactory(clientType, hs);
}
}
| 725 |
0 |
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientZipkinTracingSample.java
|
package com.netflix.evcache.sample;
import brave.Tracing;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.EVCacheTracingEventListener;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
public class EVCacheClientZipkinTracingSample {
private final EVCache evCache;
private final List<Span> reportedSpans;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* <p>This tells the EVCache library to use the "simple node list provider" for EVCACHE_APP1 (by
* setting the relevant system property), and then it copies the EVC_SAMPLE_DEPLOYMENT environment
* variable to the EVCACHE_APP1-NODES system property.
*
* <p>If the environment variable isn't set, default memcached server is at localhost:11211.
*
* <p>Finally, this initializes "evCache" using EVCache.Builder, specifying the application name
* "EVCACHE_APP1."
*/
public EVCacheClientZipkinTracingSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a defaul.
deploymentDescriptor = "SERVERGROUP1=localhost:11211";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
EVCacheClientPoolManager poolManager = EVCacheClientPoolManager.getInstance();
poolManager.initEVCache("EVCACHE_APP1");
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
EVCacheTracingEventListener tracingEventListener =
new EVCacheTracingEventListener(poolManager, tracing.tracer());
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* <p>See the memcached documentation for what "timeToLive" means. Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now." Large integers
* mean "expires at this Unix timestamp" (seconds since 1/1/1970). Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key could not be retrieved, whether
* due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public void printZipkinSpans() {
System.out.println("--> " + reportedSpans.toString());
}
/** Main Program which does some simple sets and gets. */
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println(
"java -cp "
+ System.getProperty("java.class.path")
+ " com.netflix.evcache.sample.EVCacheClientZipkinTracingSample");
}
try {
EVCacheClientZipkinTracingSample evCacheClientZipkinTracingSample =
new EVCacheClientZipkinTracingSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientZipkinTracingSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientZipkinTracingSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
// Print collected Zipkin Spans
evCacheClientZipkinTracingSample.printZipkinSpans();
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 726 |
0 |
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-client-sample/src/main/java/com/netflix/evcache/sample/EVCacheClientSample.java
|
package com.netflix.evcache.sample;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import java.util.concurrent.Future;
/**
* Created by senugula on 3/24/16.
* Updated by akpratt on 5/13/16.
*/
/**
* This standalone program demonstrates how to use EVCacheClient for
* set/get operations using memcached running on your local box.
*
* By default, this program expects there to be two memcached processes
* on the local host, on ports 11211 and 11212. They get used as two
* replicas of a single shard each.
*
* You can override this configuration by setting the environment
* variable EVC_SAMPLE_DEPLOYMENT to a string which describes your
* deployment. The format for that string is as described in the EVCache
* documentation for a simple node list provider. It would look like
* this for a two-replica deployment with two shards per replica:
*
* SERVERGROUP1=host1:port1,host2:port2;SERVERGROUP2=host3:port3,host4:port4
*/
public class EVCacheClientSample {
private final EVCache evCache;
private static boolean verboseMode = false;
/**
* Default constructor.
*
* This tells the EVCache library to use the "simple node list
* provider" for EVCACHE_APP1 (by setting the relevant system
* property), and then it copies the EVC_SAMPLE_DEPLOYMENT
* environment variable to the EVCACHE_APP1-NODES system property.
*
* If the environment variable isn't set, default is two shards on
* localhost, on port 11211 and 11212, configured as two replicas with
* one shard each.
*
* Finally, this initializes "evCache" using EVCache.Builder,
* specifying the application name "EVCACHE_APP1."
*/
public EVCacheClientSample() {
String deploymentDescriptor = System.getenv("EVC_SAMPLE_DEPLOYMENT");
if (deploymentDescriptor == null) {
// No deployment descriptor in the environment, use a default: two local
// memcached processes configured as two replicas of one shard each.
deploymentDescriptor = "SERVERGROUP1=localhost:11211;SERVERGROUP2=localhost:11212";
}
System.setProperty("EVCACHE_APP1.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_APP1-NODES", deploymentDescriptor);
evCache = new EVCache.Builder().setAppName("EVCACHE_APP1").build();
}
/**
* Set a key in the cache.
*
* See the memcached documentation for what "timeToLive" means.
* Zero means "never expires."
* Small integers (under some threshold) mean "expires this many seconds from now."
* Large integers mean "expires at this Unix timestamp" (seconds since 1/1/1970).
* Warranty expires 17-Jan 2038.
*/
public void setKey(String key, String value, int timeToLive) throws Exception {
try {
Future<Boolean>[] _future = evCache.set(key, value, timeToLive);
// Wait for all the Futures to complete.
// In "verbose" mode, show the status for each.
for (Future<Boolean> f : _future) {
boolean didSucceed = f.get();
if (verboseMode) {
System.out.println("per-shard set success code for key " + key + " is " + didSucceed);
}
}
if (!verboseMode) {
// Not verbose. Just give one line of output per "set," without a success code
System.out.println("finished setting key " + key);
}
} catch (EVCacheException e) {
e.printStackTrace();
}
}
/**
* Get the data for a key from the cache. Returns null if the key
* could not be retrieved, whether due to a cache miss or errors.
*/
public String getKey(String key) {
try {
String _response = evCache.<String>get(key);
return _response;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
/**
* Main Program which does some simple sets and gets.
*/
public static void main(String[] args) {
// set verboseMode based on the environment variable
verboseMode = ("true".equals(System.getenv("EVCACHE_SAMPLE_VERBOSE")));
if (verboseMode) {
System.out.println("To run this sample app without using Gradle:");
System.out.println("java -cp " + System.getProperty("java.class.path") + " com.netflix.evcache.sample.EVCacheClientSample");
}
try {
EVCacheClientSample evCacheClientSample = new EVCacheClientSample();
// Set ten keys to different values
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = "data_" + i;
// Set the TTL to 24 hours
int ttl = 86400;
evCacheClientSample.setKey(key, value, ttl);
}
// Do a "get" for each of those same keys
for (int i = 0; i < 10; i++) {
String key = "key_" + i;
String value = evCacheClientSample.getKey(key);
System.out.println("Get of " + key + " returned " + value);
}
} catch (Exception e) {
e.printStackTrace();
}
// We have to call System.exit() now, because some background
// threads were started without the "daemon" flag. This is
// probably a mistake somewhere, but hey, this is only a sample app.
System.exit(0);
}
}
| 727 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/test/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/test/java/com/netflix/evcache/EVCacheTracingEventListenerUnitTests.java
|
package com.netflix.evcache;
import brave.Tracing;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import zipkin2.Span;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.*;
public class EVCacheTracingEventListenerUnitTests {
List<zipkin2.Span> reportedSpans;
EVCacheTracingEventListener tracingListener;
EVCacheClient mockEVCacheClient;
EVCacheEvent mockEVCacheEvent;
@BeforeMethod
public void resetMocks() {
mockEVCacheClient = mock(EVCacheClient.class);
when(mockEVCacheClient.getServerGroupName()).thenReturn("dummyServerGroupName");
mockEVCacheEvent = mock(EVCacheEvent.class);
when(mockEVCacheEvent.getClients()).thenReturn(Arrays.asList(mockEVCacheClient));
when(mockEVCacheEvent.getCall()).thenReturn(EVCache.Call.GET);
when(mockEVCacheEvent.getAppName()).thenReturn("dummyAppName");
when(mockEVCacheEvent.getCacheName()).thenReturn("dummyCacheName");
when(mockEVCacheEvent.getEVCacheKeys())
.thenReturn(Arrays.asList(new EVCacheKey("dummyAppName", "dummyKey", "dummyCanonicalKey", null, null, null, null)));
when(mockEVCacheEvent.getStatus()).thenReturn("success");
when(mockEVCacheEvent.getDurationInMillis()).thenReturn(1L);
when(mockEVCacheEvent.getTTL()).thenReturn(0);
when(mockEVCacheEvent.getCachedData())
.thenReturn(new CachedData(1, "dummyData".getBytes(), 255));
Map<String, Object> eventAttributes = new HashMap<>();
doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
Object value = arguments[1];
eventAttributes.put(key, value);
return null;
}
})
.when(mockEVCacheEvent)
.setAttribute(any(), any());
doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
String key = (String) arguments[0];
return eventAttributes.get(key);
}
})
.when(mockEVCacheEvent)
.getAttribute(any());
reportedSpans = new ArrayList<>();
Tracing tracing = Tracing.newBuilder().spanReporter(reportedSpans::add).build();
tracingListener =
new EVCacheTracingEventListener(mock(EVCacheClientPoolManager.class), tracing.tracer());
}
public void verifyCommonTags(List<zipkin2.Span> spans) {
Assert.assertEquals(spans.size(), 1, "Number of expected spans are not matching");
zipkin2.Span span = spans.get(0);
Assert.assertEquals(span.kind(), Span.Kind.CLIENT, "Span Kind are not equal");
Assert.assertEquals(
span.name(), EVCacheTracingEventListener.EVCACHE_SPAN_NAME, "Cache name are not equal");
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.APP_NAME), "APP_NAME tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CACHE_NAME_PREFIX), "CACHE_NAME_PREFIX tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CALL), "CALL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.SERVER_GROUPS), "SERVER_GROUPS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.CANONICAL_KEYS), "CANONICAL_KEYS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.STATUS), "STATUS tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.LATENCY), "LATENCY tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_TTL), "DATA_TTL tag is missing");
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.DATA_SIZE), "DATA_SIZE tag is missing");
}
public void verifyErrorTags(List<zipkin2.Span> spans) {
zipkin2.Span span = spans.get(0);
Map<String, String> tags = span.tags();
Assert.assertTrue(tags.containsKey(EVCacheTracingTags.ERROR), "ERROR tag is missing");
}
@Test
public void testEVCacheListenerOnComplete() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onComplete(mockEVCacheEvent);
verifyCommonTags(reportedSpans);
}
@Test
public void testEVCacheListenerOnError() {
tracingListener.onStart(mockEVCacheEvent);
tracingListener.onError(mockEVCacheEvent, new RuntimeException("Unexpected Error"));
verifyCommonTags(reportedSpans);
verifyErrorTags(reportedSpans);
}
}
| 728 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingTags.java
|
package com.netflix.evcache;
public class EVCacheTracingTags {
public static String CACHE_NAME_PREFIX = "evcache.cache_name_prefix";
public static String APP_NAME = "evcache.app_name";
public static String STATUS = "evcache.status";
public static String LATENCY = "evcache.latency";
public static String CALL = "evcache.call";
public static String SERVER_GROUPS = "evcache.server_groups";
public static String HASH_KEYS = "evcache.hash_keys";
public static String CANONICAL_KEYS = "evcache.canonical_keys";
public static String DATA_TTL = "evcache.data_ttl";
public static String DATA_SIZE = "evcache.data_size";
public static String ERROR = "evcache.error";
}
| 729 |
0 |
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-zipkin-tracing/src/main/java/com/netflix/evcache/EVCacheTracingEventListener.java
|
package com.netflix.evcache;
import brave.Span;
import brave.Tracer;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/** Adds tracing tags for EvCache calls. */
public class EVCacheTracingEventListener implements EVCacheEventListener {
public static String EVCACHE_SPAN_NAME = "evcache";
private static Logger logger = LoggerFactory.getLogger(EVCacheTracingEventListener.class);
private static String CLIENT_SPAN_ATTRIBUTE_KEY = "clientSpanAttributeKey";
private final Tracer tracer;
public EVCacheTracingEventListener(EVCacheClientPoolManager poolManager, Tracer tracer) {
poolManager.addEVCacheEventListener(this);
this.tracer = tracer;
}
@Override
public void onStart(EVCacheEvent e) {
try {
Span clientSpan =
this.tracer.nextSpan().kind(Span.Kind.CLIENT).name(EVCACHE_SPAN_NAME).start();
// Return if tracing has been disabled
if(clientSpan.isNoop()){
return;
}
String appName = e.getAppName();
this.safeTag(clientSpan, EVCacheTracingTags.APP_NAME, appName);
String cacheNamePrefix = e.getCacheName();
this.safeTag(clientSpan, EVCacheTracingTags.CACHE_NAME_PREFIX, cacheNamePrefix);
String call = e.getCall().name();
this.safeTag(clientSpan, EVCacheTracingTags.CALL, call);
/**
* Note - e.getClients() returns a list of clients associated with the EVCacheEvent.
*
* <p>Read operation will have only 1 EVCacheClient as reading from just 1 instance of cache
* is sufficient. Write operations will have appropriate number of clients as each client will
* attempt to write to its cache instance.
*/
String serverGroup;
List<String> serverGroups = new ArrayList<>();
for (EVCacheClient client : e.getClients()) {
serverGroup = client.getServerGroupName();
if (StringUtils.isNotBlank(serverGroup)) {
serverGroups.add("\"" + serverGroup + "\"");
}
}
clientSpan.tag(EVCacheTracingTags.SERVER_GROUPS, serverGroups.stream().collect(Collectors.joining(",", "[", "]")));
/**
* Note - EVCache client creates a hash key if the given canonical key size exceeds 255
* characters.
*
* <p>There have been cases where canonical key size exceeded few megabytes. As caching client
* creates a hash of such canonical keys and optimizes the storage in the cache servers, it is
* safe to annotate hash key instead of canonical key in such cases.
*/
String hashKey;
List<String> hashKeys = new ArrayList<>();
List<String> canonicalKeys = new ArrayList<>();
for (EVCacheKey keyObj : e.getEVCacheKeys()) {
hashKey = keyObj.getHashKey();
if (StringUtils.isNotBlank(hashKey)) {
hashKeys.add("\"" + hashKey + "\"");
} else {
canonicalKeys.add("\"" + keyObj.getCanonicalKey() + "\"");
}
}
if(hashKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.HASH_KEYS,
hashKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
if(canonicalKeys.size() > 0) {
this.safeTag(clientSpan, EVCacheTracingTags.CANONICAL_KEYS,
canonicalKeys.stream().collect(Collectors.joining(",", "[", "]")));
}
/**
* Note - tracer.spanInScope(...) method stores Spans in the thread local object.
*
* <p>As EVCache write operations are asynchronous and quorum based, we are avoiding attaching
* clientSpan with tracer.spanInScope(...) method. Instead, we are storing the clientSpan as
* an object in the EVCacheEvent's attributes.
*/
e.setAttribute(CLIENT_SPAN_ATTRIBUTE_KEY, clientSpan);
} catch (Exception exception) {
logger.error("onStart exception", exception);
}
}
@Override
public void onComplete(EVCacheEvent e) {
try {
this.onFinishHelper(e, null);
} catch (Exception exception) {
logger.error("onComplete exception", exception);
}
}
@Override
public void onError(EVCacheEvent e, Throwable t) {
try {
this.onFinishHelper(e, t);
} catch (Exception exception) {
logger.error("onError exception", exception);
}
}
/**
* On throttle is not a trace event, but it is used to decide whether to throttle. We don't want
* to interfere so always return false.
*/
@Override
public boolean onThrottle(EVCacheEvent e) throws EVCacheException {
return false;
}
private void onFinishHelper(EVCacheEvent e, Throwable t) {
Object clientSpanObj = e.getAttribute(CLIENT_SPAN_ATTRIBUTE_KEY);
// Return if the previously saved Client Span is null
if (clientSpanObj == null) {
return;
}
Span clientSpan = (Span) clientSpanObj;
try {
if (t != null) {
this.safeTag(clientSpan, EVCacheTracingTags.ERROR, t.toString());
}
String status = e.getStatus();
this.safeTag(clientSpan, EVCacheTracingTags.STATUS, status);
long latency = this.getDurationInMicroseconds(e.getDurationInMillis());
clientSpan.tag(EVCacheTracingTags.LATENCY, String.valueOf(latency));
int ttl = e.getTTL();
clientSpan.tag(EVCacheTracingTags.DATA_TTL, String.valueOf(ttl));
CachedData cachedData = e.getCachedData();
if (cachedData != null) {
int cachedDataSize = cachedData.getData().length;
clientSpan.tag(EVCacheTracingTags.DATA_SIZE, String.valueOf(cachedDataSize));
}
} finally {
clientSpan.finish();
}
}
private void safeTag(Span span, String key, String value) {
if (StringUtils.isNotBlank(value)) {
span.tag(key, value);
}
}
private long getDurationInMicroseconds(long durationInMillis) {
// EVCacheEvent returns durationInMillis as -1 if endTime is not available.
if(durationInMillis == -1){
return durationInMillis;
} else {
// Since the underlying EVCacheEvent returns duration in milliseconds we already
// lost the required precision for conversion to microseconds. Multiplication
// by 1000 should suffice here.
return durationInMillis * 1000;
}
}
}
| 730 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/Base.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Appender;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.Scheduler;
@SuppressWarnings("unused")
public abstract class Base {
static {
BasicConfigurator.configure();
final Layout LAYOUT = new PatternLayout("%d{ISO8601} %-5p [%c{1}:%M:%L] %m%n");
final Appender STDOUT = new ConsoleAppender(LAYOUT, ConsoleAppender.SYSTEM_OUT);
final org.apache.log4j.Logger ROOT_LOGGER = org.apache.log4j.Logger.getRootLogger();
ROOT_LOGGER.removeAllAppenders();
ROOT_LOGGER.setLevel(Level.WARN);
ROOT_LOGGER.addAppender(STDOUT);
}
private static final Logger log = LoggerFactory.getLogger(Base.class);
protected EVCache evCache = null;
protected EVCacheClientPoolManager manager = null;
protected Properties props = null;
protected Properties getProps() {
if(props != null) return props;
props = new Properties();
initProps();
return props;
}
protected void initProps() {
String hostname = System.getenv("EC2_HOSTNAME");
if(hostname == null) {
props.setProperty("eureka.datacenter", "datacenter");//change to ndc while running on desktop
props.setProperty("eureka.validateInstanceId","false");
props.setProperty("eureka.mt.connect_timeout","1");
props.setProperty("eureka.mt.read_timeout","1");
} else {
props.setProperty("eureka.datacenter", "cloud");
props.setProperty("eureka.validateInstanceId","true");
}
props.setProperty("eureka.environment", "test");
props.setProperty("eureka.region", "us-east-1");
props.setProperty("eureka.appid", "clatency");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheNodeLocator", "ERROR");
props.setProperty("log4j.logger.com.netflix.evcache.pool.EVCacheClientUtil", "ERROR");
}
@BeforeSuite
public void setupEnv() {
Properties props = getProps();
try {
for(Entry<Object, Object> prop : props.entrySet()) {
System.setProperty(prop.getKey().toString(), prop.getValue().toString());
}
} catch (Throwable e) {
e.printStackTrace();
log.error(e.getMessage(), e);
}
}
@AfterSuite
public void shutdown() {
manager.shutdown();
}
protected EVCache.Builder getNewBuilder() {
final EVCache.Builder evCacheBuilder = new EVCache.Builder();
if(log.isDebugEnabled()) log.debug("evCacheBuilder : " + evCacheBuilder);
return evCacheBuilder;
}
protected boolean append(int i, EVCache gCache) throws Exception {
String val = ";APP_" + i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.append(key, val, 60 * 60);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("APPEND : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean appendOrAdd(int i, EVCache gCache) throws Exception {
return appendOrAdd(i, gCache, 60 * 60);
}
protected boolean appendOrAdd(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_aa_" + i;
String key = "key_" + i;
EVCacheLatch latch = gCache.appendOrAdd(key, val, null, ttl, Policy.ALL_MINUS_1);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; Latch = " + latch);
boolean status = latch.await(2000, TimeUnit.MILLISECONDS);
if(log.isDebugEnabled()) log.debug("AppendOrAdd : key : " + key + "; success = " + status);
return true;
}
public boolean add(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_add_"+System.currentTimeMillis();
String key = "key_" + i;
boolean status = gCache.add(key, val, null, 60 * 60);
if(log.isDebugEnabled()) log.debug("ADD : key : " + key + "; success = " + status);
return status;
}
public boolean insert(int i, EVCache gCache) throws Exception {
//String val = "This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah val_"+i;
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean replace(int i, EVCache gCache) throws Exception {
return replace(i, gCache, 60 * 60);
}
protected boolean replace(int i, EVCache gCache, int ttl) throws Exception {
String val = "val_replaced_" + i;
String key = "key_" + i;
EVCacheLatch status = gCache.replace(key, val, null, ttl, Policy.ALL);
boolean opStatus = status.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("REPLACE : key : " + key + "; success = " + opStatus + "; EVCacheLatch = " + status);
return status.getSuccessCount() > 0;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
@SuppressWarnings("deprecation")
protected boolean insertUsingLatch(int i, String app) throws Exception {
String val = "val_" + i;
String key = "key_" + i;
long start = System.currentTimeMillis();
final EVCacheClient[] clients = manager.getEVCacheClientPool(app).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(EVCacheLatch.Policy.ALL, clients.length, app);
for (EVCacheClient client : clients) {
client.set(key, val, 60 * 60, latch);
}
boolean success = latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("SET LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec");
return success;
}
protected boolean deleteLatch(int i, String appName) throws Exception {
long start = System.currentTimeMillis();
String key = "key_" + i;
final EVCacheClient[] clients = manager.getEVCacheClientPool(appName).getEVCacheClientForWrite();
final EVCacheLatch latch = new EVCacheLatchImpl(Policy.ALL, clients.length, appName);
for (EVCacheClient client : clients) {
client.delete(key, latch);
}
latch.await(1000, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled()) log.debug("DELETE LATCH : key : " + key + "; Finished in " + (System.currentTimeMillis() - start) + " msec" + "; Latch : " + latch);
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String completableFutureGet(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
gCache.<String>getAsync(key).handle((data, ex) -> {
System.out.println(data);
return data;
});
/*
String val = value.get();
if(log.isDebugEnabled()) log.debug("get : key : " + key
+ " completableFuture value = " + value
+ " actual value = " + val);
return val;
*/
return null;
}
public String getWithPolicy(int i, EVCache gCache, Policy policy) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, null, policy);
if(log.isDebugEnabled()) log.debug("get with Policy : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getAsyncBulk(String keys[], EVCache gCache) throws Exception {
final CompletableFuture<Map<String, String>> value = gCache.<String>getAsyncBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value.get();
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public String getObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouchObservable(int i, EVCache gCache, Scheduler scheduler) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 60 * 60, scheduler).toBlocking().value();
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
class RemoteCaller implements Runnable {
EVCache gCache;
public RemoteCaller(EVCache c) {
this.gCache = c;
}
public void run() {
try {
int count = 1;
for(int i = 0; i < 100; i++) {
insert(i, gCache);
get(i, gCache);
delete(i, gCache);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
}
| 731 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEVCacheTest.java
|
package com.netflix.evcache.test;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.EVCacheSerializingTranscoder;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.SerializingTranscoder;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
import static org.testng.Assert.*;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEVCacheTest extends Base {
private static final Logger log = LogManager.getLogger(SimpleEVCacheTest.class);
private static final String APP_NAME = "EVCACHE_TEST";
private static final String ALIAS_APP_NAME = "EVCACHE";
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEVCacheTest test = new SimpleEVCacheTest();
test.setProps();
test.setupEnv();
test.testAll();
}
@BeforeSuite
public void setProps() {
BasicConfigurator.resetConfiguration();
BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n")));
Logger.getRootLogger().setLevel(Level.INFO);
Logger.getLogger(SimpleEVCacheTest.class).setLevel(Level.DEBUG);
Logger.getLogger(Base.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheImpl.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClient.class).setLevel(Level.DEBUG);
Logger.getLogger(EVCacheClientPool.class).setLevel(Level.DEBUG);
final Properties props = getProps();
props.setProperty(APP_NAME + ".EVCacheClientPool.zoneAffinity", "false");
props.setProperty(APP_NAME + ".use.simple.node.list.provider", "true");
props.setProperty(APP_NAME + ".EVCacheClientPool.readTimeout", "1000");
props.setProperty(APP_NAME + ".EVCacheClientPool.bulkReadTimeout", "1000");
props.setProperty(APP_NAME + ".max.read.queue.length", "100");
props.setProperty(APP_NAME + ".operation.timeout", "10000");
props.setProperty(APP_NAME + ".throw.exception", "false");
// Setting properties here for testing how we can disable aliases. If there are test case
// that requires aliases, these properties should go under a special condition.
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".alias", ALIAS_APP_NAME);
props.setProperty("EVCacheClientPoolManager." + APP_NAME + ".ignoreAlias", "true");
// End alias properties
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
@Test public void testDisablingAlias()
{
// Ensure alias is disabled, we see "EVCACHE_TEST" instead of "EVCACHE" as we have set above.
EVCacheClientPool pool = EVCacheClientPoolManager.getInstance().getEVCacheClientPool(APP_NAME);
assertEquals(pool.getAppName(), APP_NAME);
}
public void testAll() {
try {
EVCacheClientPoolManager.getInstance().initEVCache(APP_NAME);
testDisablingAlias();
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
// testAdd();
testInsert();
// testAppend();
testGet();
testGetWithPolicy();
testEVCacheTranscoder();
// testGetObservable();
// testGetAndTouch();
// testBulk();
// testBulkAndTouch();
// testAppendOrAdd();
// testCompletableFutureGet();
// testCompletableFutureBulk();
// if(i++ % 5 == 0) testDelete();
//Thread.sleep(3000);
} catch (Exception e) {
log.error(e);
}
//Thread.sleep(3000);
}
} catch (Exception e) {
log.error(e);
}
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
super.setupEnv();
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_TEST").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
//insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
//assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetWithPolicy() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getWithPolicy(i, evCache, Policy.QUORUM);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetWithPolicy" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> status : statuses) {
assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testEVCacheTranscoder() throws Exception {
EVCacheSerializingTranscoder evcacheTranscoder = new EVCacheSerializingTranscoder();
SerializingTranscoder serializingTranscoder = new SerializingTranscoder();
// long string to trigger compression
String val = "val_01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789";
CachedData evCachedData = evcacheTranscoder.encode(val);
CachedData serializingCachedData = serializingTranscoder.encode(val);
assertTrue(Arrays.equals(evCachedData.getData(), serializingCachedData.getData()), "cacheData same" + evCachedData.toString());
if(log.isDebugEnabled()) log.debug("EVCacheTranscoder result equal to SerializingTranscoder: " + Arrays.equals(evCachedData.getData(), serializingCachedData.getData()));
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error(e);
}
}
}
}
| 732 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/MockEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.mockito.Matchers.anyCollection;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Future;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheException;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import rx.functions.Action1;
public class MockEVCacheTest {
protected EVCache evCache = null;
private static final Logger log = LoggerFactory.getLogger(MockEVCacheTest.class);
private int loops = 10;
public MockEVCacheTest() {
}
@Test
public void testEVCache() {
this.evCache = new DummyEVCacheImpl().getDummyCache();
assertNotNull(evCache);
}
public boolean insert(int i, EVCache gCache) throws Exception {
String val = "val_"+i;
String key = "key_" + i;
Future<Boolean>[] status = gCache.set(key, val, 24 * 60 * 60);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
public boolean delete(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.delete(key);
for(Future<Boolean> s : status) {
if(log.isDebugEnabled()) log.debug("DELETE : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if(s.get() == Boolean.FALSE) return false;
}
return true;
}
protected boolean touch(int i, EVCache gCache) throws Exception {
return touch(i, gCache, 24 * 60 * 60);
}
protected boolean touch(int i, EVCache gCache, int ttl) throws Exception {
String key = "key_" + i;
Future<Boolean>[] status = gCache.touch(key, ttl);
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("TOUCH : key : " + key + "; success = " + s.get() + "; Future = " + s.toString());
if (s.get() == Boolean.FALSE) return false;
}
return true;
}
public String get(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
return value;
}
public String getAndTouch(int i, EVCache gCache) throws Exception {
String key = "key_" + i;
String value = gCache.<String>getAndTouch(key, 24 * 60 * 60);
if(log.isDebugEnabled()) log.debug("getAndTouch : key : " + key + " val = " + value);
return value;
}
public Map<String, String> getBulk(String keys[], EVCache gCache) throws Exception {
final Map<String, String> value = gCache.<String>getBulk(keys);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
public Map<String, String> getBulkAndTouch(String keys[], EVCache gCache, int ttl) throws Exception {
final Map<String, String> value = gCache.<String>getBulkAndTouch(Arrays.asList(keys), null, ttl);
if(log.isDebugEnabled()) log.debug("getBulk : keys : " + Arrays.toString(keys) + "; values = " + value);
return value;
}
@Test(dependsOnMethods = { "testEVCache" })
public void testInsert() throws Exception {
for (int i = 0; i < loops; i++) {
assertTrue(insert(i, evCache));
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testGet() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = get(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < loops; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertNotNull(vals);
for (int i = 0; i < keys.length; i++) {
String key = keys[i];
String val = vals.get(key);
if (log.isDebugEnabled()) log.debug("key " + key + " returned val " + val);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[loops];
for (int i = 0; i < loops; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 24 * 60 * 60);
assertNotNull(vals);
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
if (val == null) {
if (log.isDebugEnabled()) log.debug("key " + key + " returned null");
} else {
assertTrue(val.equals("val_" + i));
}
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testDelete() throws Exception {
for (int i = 0; i < loops; i++) {
delete(i, evCache);
}
}
public void onComplete(EVCacheOperationFuture<String> future) throws Exception {
if (log.isDebugEnabled()) log.debug("getl : key : " + future.getKey() + ", val = " + future.get());
}
static class OnErrorHandler implements Action1<Throwable> {
private final String key;
public OnErrorHandler(String key) {
this.key = key;
}
@Override
public void call(Throwable t1) {
if (log.isDebugEnabled()) log.debug("Could not get value for key: " + key + "; Exception is ", t1);
}
}
static class OnNextHandler implements Action1<String> {
private final String key;
public OnNextHandler(String key) {
this.key = key;
}
@Override
public void call(String val) {
if (log.isDebugEnabled()) log.debug("Observable : key " + key + "; val = " + val);
}
}
/**
* Dummy Cache used for debugging purpose (simple way to disable cache)
*/
private static class DummyEVCacheImpl {
private final EVCache cache;
@SuppressWarnings("unchecked")
public DummyEVCacheImpl() {
cache = mock(EVCache.class);
try {
when(cache.set(anyString(), anyObject(), anyInt())).thenReturn(new Future[0]);
when(cache.get(anyString())).thenReturn("");
when(cache.getAndTouch(anyString(), anyInt())).thenReturn("");
when(cache.getBulk(anyCollection())).thenReturn(Collections.emptyMap());
when(cache.delete(anyString())).thenReturn(new Future[0]);
} catch (EVCacheException e) {
log.error("Unable to create mock EVCache", e);
}
}
public EVCache getDummyCache() {
return cache;
}
}
}
| 733 |
0 |
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/test/java/com/netflix/evcache/test/SimpleEurekaEVCacheTest.java
|
package com.netflix.evcache.test;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import rx.schedulers.Schedulers;
@SuppressWarnings({"unused","deprecation"})
public class SimpleEurekaEVCacheTest extends Base {
private static final Logger log = LoggerFactory.getLogger(SimpleEurekaEVCacheTest.class);
private ThreadPoolExecutor pool = null;
public static void main(String args[]) {
SimpleEurekaEVCacheTest test = new SimpleEurekaEVCacheTest();
test.setProps();
test.testAll();
}
@BeforeSuite
public void setProps() {
org.apache.log4j.Logger.getLogger(SimpleEurekaEVCacheTest.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(Base.class).setLevel(Level.DEBUG);
org.apache.log4j.Logger.getLogger(EVCacheImpl.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClient.class).setLevel(Level.ERROR);
org.apache.log4j.Logger.getLogger(EVCacheClientPool.class).setLevel(Level.ERROR);
System.setProperty("evcache.use.simple.node.list.provider", "true");
System.setProperty("EVCACHE_AB.EVCacheClientPool.readTimeout", "100000");
System.setProperty("EVCACHE_AB.EVCacheClientPool.bulkReadTimeout", "10000");
System.setProperty("EVCACHE_AB.max.read.queue.length", "100");
System.setProperty("EVCACHE_AB.operation.timeout", "10000");
System.setProperty("EVCACHE_AB.throw.exception", "false");
System.setProperty("EVCACHE_AB.chunk.data", "false");
System.setProperty("NETFLIX_ENVIRONMENT", "test");
System.setProperty("EC2_REGION", "us-east-1");
System.setProperty("evcache.thread.daemon", "true");
int maxThreads = 2;
final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(100000);
pool = new ThreadPoolExecutor(maxThreads * 4, maxThreads * 4, 30, TimeUnit.SECONDS, queue);
pool.prestartAllCoreThreads();
}
public SimpleEurekaEVCacheTest() {
}
@BeforeSuite(dependsOnMethods = { "setProps" })
public void setupClusterDetails() {
manager = EVCacheClientPoolManager.getInstance();
}
public void testAll() {
try {
setupClusterDetails();
EVCacheClientPoolManager.getInstance().initEVCache("EVCACHE_AB");
testEVCache();
int i = 1;
boolean flag = true;
while (flag) {
try {
testAdd();
testInsert();
testInsertAsync();
//// testAppend();
testGet();
testGetObservable();
testGetAndTouch();
testBulk();
testBulkAndTouch();
testAppendOrAdd();
testCompletableFutureGet();
testCompletableFutureBulk();
if(i++ % 5 == 0) testDelete();
Thread.sleep(1000);
if (i > 100) break;
} catch (Exception e) {
log.error("Exception", e);
}
//Thread.sleep(3000);
}
Thread.sleep(100);
} catch (Exception e) {
log.error("Exception", e);
}
shutdown();
}
public void testGetForKey(String key) throws Exception {
String value = evCache.<String>get(key);
if(log.isDebugEnabled()) log.debug("get : key : " + key + " val = " + value);
}
@BeforeSuite
public void setupEnv() {
}
protected EVCache evCache = null;
@Test
public void testEVCache() {
this.evCache = (new EVCache.Builder()).setAppName("EVCACHE_AB").setCachePrefix(null).enableRetry().build();
assertNotNull(evCache);
}
@Test(dependsOnMethods = { "testEVCache" })
public void testAdd() throws Exception {
for (int i = 0; i < 10; i++) {
add(i, evCache);
}
}
@Test(dependsOnMethods = { "testAdd" })
public void testInsert() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insert(i, evCache), "SET : Following Index failed - " + i + " for evcache - " + evCache);
insert(i, evCache);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testAppend() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(append(i, evCache), "APPEND : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testAppend" })
public void testGet() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = get(i, evCache);
// assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testInsert" })
public void testCompletableFutureGet() throws Exception {
for (int i = 0; i < 1000; i++) {
final String val = completableFutureGet(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testCompletableFutureBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getAsyncBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testGet" })
public void testGetAndTouch() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getAndTouch(i, evCache);
assertNotNull(val);
}
}
@Test(dependsOnMethods = { "testGetAndTouch" })
public void testBulk() throws Exception {
final String[] keys = new String[12];
for (int i = 0; i < keys.length; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulk(keys, evCache);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
@Test(dependsOnMethods = { "testBulk" })
public void testBulkAndTouch() throws Exception {
final String[] keys = new String[10];
for (int i = 0; i < 10; i++) {
keys[i] = "key_" + i;
}
Map<String, String> vals = getBulkAndTouch(keys, evCache, 60 * 60);
assertTrue(!vals.isEmpty());
for (int i = 0; i < vals.size(); i++) {
String key = "key_" + i;
String val = vals.get(key);
}
}
public void testAppendOrAdd() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(appendOrAdd(i, evCache));
}
}
@Test(dependsOnMethods = { "testBulkAndTouch" })
public void testReplace() throws Exception {
for (int i = 0; i < 10; i++) {
replace(i, evCache);
}
}
@Test(dependsOnMethods = { "testReplace" })
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(delete(i, evCache), "DELETE : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testDelete" })
public void testInsertAsync() throws Exception {
for (int i = 0; i < 10; i++) {
boolean flag = insertAsync(i, evCache);
if(log.isDebugEnabled()) log.debug("SET : async : i: " + i + " flag = " + flag);
assertTrue(flag, "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
}
}
@Test(dependsOnMethods = { "testInsertAsync" })
public void testTouch() throws Exception {
for (int i = 0; i < 10; i++) {
touch(i, evCache, 1000);
String val = get(i, evCache);
assertTrue(val != null);
}
}
public boolean insertAsync(int i, EVCache gCache) throws Exception {
// String val = "This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah.This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah .This is a very long value that should work well since we are going to use compression on it. This is a very long value that should work well since we are going to use compression on it. blah blah blah blah blah blah blah
// blah blah blah blah blah blah blah blah blah blah blah val_"
// + i;
String val = "val_" + i;
String key = "key_" + i;
Future<Boolean>[] statuses = gCache.set(key, val, 24 * 60 * 60);
// for(Future<Boolean> status : statuses) {
// assertTrue(status.get(), "SET ASYNC : Following Index failed - " + i + " for evcache - " + evCache);
// }
// pool.submit(new StatusChecker(key, statuses));
return true;
}
@Test(dependsOnMethods = { "testTouch" })
public void testInsertLatch() throws Exception {
for (int i = 0; i < 10; i++) {
assertTrue(insertUsingLatch(i, "EVCACHE"));
}
}
@Test(dependsOnMethods = { "testInsertLatch" })
public void testDeleteLatch() throws Exception {
for (int i = 0; i < 10; i++) {
deleteLatch(i, "EVCACHE");
}
}
public void testGetObservable() throws Exception {
for (int i = 0; i < 10; i++) {
final String val = getObservable(i, evCache, Schedulers.computation());
// Observable<String> obs = evCache.<String> observeGet(key);
// obs.doOnNext(new OnNextHandler(key)).doOnError(new OnErrorHandler(key)).subscribe();
}
}
class StatusChecker implements Runnable {
Future<Boolean>[] status;
String key;
public StatusChecker(String key, Future<Boolean>[] status) {
this.status = status;
this.key = key;
}
public void run() {
try {
for (Future<Boolean> s : status) {
if (log.isDebugEnabled()) log.debug("SET : key : " + key + "; success = " + s.get());
}
} catch (Exception e) {
log.error("Exception", e);
}
}
}
@AfterSuite
public void shutdown() {
pool.shutdown();
super.shutdown();
}
}
| 734 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheConnection.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedSelectorException;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.ops.Operation;
public class EVCacheConnection extends MemcachedConnection {
private static final Logger log = LoggerFactory.getLogger(EVCacheConnection.class);
public EVCacheConnection(String name, int bufSize, ConnectionFactory f,
List<InetSocketAddress> a, Collection<ConnectionObserver> obs,
FailureMode fm, OperationFactory opfactory) throws IOException {
super(bufSize, f, a, obs, fm, opfactory);
setName(name);
}
@Override
public void shutdown() throws IOException {
try {
super.shutdown();
for (MemcachedNode qa : getLocator().getAll()) {
if (qa instanceof EVCacheNode) {
((EVCacheNode) qa).shutdown();
}
}
} finally {
if(running) {
running = false;
if(log.isWarnEnabled()) log.warn("Forceful shutdown by interrupting the thread.", new Exception());
interrupt();
}
}
}
public void run() {
while (running) {
try {
handleIO();
} catch (IOException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (CancelledKeyException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (ClosedSelectorException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (IllegalStateException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (ConcurrentModificationException e) {
if (log.isDebugEnabled()) log.debug(e.getMessage(), e);
} catch (Throwable e) {
log.error("SEVERE EVCACHE ISSUE.", e);// This ensures the thread
// doesn't die
}
}
if (log.isDebugEnabled()) log.debug(toString() + " : Shutdown");
}
public String toString() {
return super.toString();
}
protected void addOperation(final MemcachedNode node, final Operation o) {
super.addOperation(node, o);
((EVCacheNode) node).incrOps();
}
@Override
public void addOperations(Map<MemcachedNode, Operation> ops) {
super.addOperations(ops);
for (MemcachedNode node : ops.keySet()) {
((EVCacheNode) node).incrOps();
}
}
@Override
public void enqueueOperation(final String key, final Operation o) {
checkState();
addOperation(key, o);
}
@Override
public CountDownLatch broadcastOperation(BroadcastOpFactory of, Collection<MemcachedNode> nodes) {
for (MemcachedNode node : nodes) {
((EVCacheNode) node).incrOps();
}
return super.broadcastOperation(of, nodes);
}
}
| 735 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheNodeMBean.java
|
package net.spy.memcached;
public interface EVCacheNodeMBean extends EVCacheNode {
}
| 736 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheMemcachedClient.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.EVCacheGetOperationListener;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.operation.EVCacheAsciiOperationFactory;
import com.netflix.evcache.operation.EVCacheBulkGetFuture;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.ipc.IpcStatus;
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.ops.ConcatenationType;
import net.spy.memcached.ops.DeleteOperation;
import net.spy.memcached.ops.GetAndTouchOperation;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatsOperation;
import net.spy.memcached.ops.StatusCode;
import net.spy.memcached.ops.StoreOperation;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.protocol.binary.BinaryOperationFactory;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.StringUtils;
import net.spy.memcached.protocol.ascii.ExecCmdOperation;
import net.spy.memcached.protocol.ascii.MetaDebugOperation;
import net.spy.memcached.protocol.ascii.MetaGetOperation;
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS",
"SIC_INNER_SHOULD_BE_STATIC_ANON" })
public class EVCacheMemcachedClient extends MemcachedClient {
private static final Logger log = LoggerFactory.getLogger(EVCacheMemcachedClient.class);
private final String appName;
private final Property<Integer> readTimeout;
private final EVCacheClient client;
private final Map<String, Timer> timerMap = new ConcurrentHashMap<String, Timer>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private Property<Long> mutateOperationTimeout;
private final ConnectionFactory connectionFactory;
private final Property<Integer> maxReadDuration, maxWriteDuration;
private final Property<Boolean> enableDebugLogsOnWrongKey;
public EVCacheMemcachedClient(ConnectionFactory cf, List<InetSocketAddress> addrs,
Property<Integer> readTimeout, EVCacheClient client) throws IOException {
super(cf, addrs);
this.connectionFactory = cf;
this.readTimeout = readTimeout;
this.client = client;
this.appName = client.getAppName();
this.maxWriteDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50);
this.maxReadDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.read.duration.metric", Integer.class).orElseGet("evcache.max.read.duration.metric").orElse(20);
this.enableDebugLogsOnWrongKey = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".enable.debug.logs.on.wrongkey", Boolean.class).orElse(false);
}
public NodeLocator getNodeLocator() {
return this.mconn.getLocator();
}
public MemcachedNode getEVCacheNode(String key) {
return this.mconn.getLocator().getPrimary(key);
}
public <T> GetFuture<T> asyncGet(final String key, final Transcoder<T> tc) {
throw new UnsupportedOperationException("asyncGet");
}
// Returns 'true' if keys don't match and logs & reports the error.
// Returns 'false' if keys match.
// TODO: Consider removing this code once we've fixed the Wrong key bug(s)
private boolean isWrongKeyReturned(String original_key, String returned_key) {
if (!original_key.equals(returned_key)) {
// If they keys don't match, log the error along with the key owning host's information and stack trace.
final String original_host = getHostNameByKey(original_key);
final String returned_host = getHostNameByKey(returned_key);
log.error("Wrong key returned. Key - " + original_key + " (Host: " + original_host + ") ; Returned Key "
+ returned_key + " (Host: " + returned_host + ")", new Exception());
client.reportWrongKeyReturned(original_host);
// If we are configured to dynamically switch log levels to DEBUG on a wrong key error, do so here.
if (enableDebugLogsOnWrongKey.get()) {
System.setProperty("log4j.logger.net.spy.memcached", "DEBUG");
}
return true;
}
return false;
}
public <T> EVCacheOperationFuture<T> asyncGet(final String key, final Transcoder<T> tc, EVCacheGetOperationListener<T> listener) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<T> rv = new EVCacheOperationFuture<T>(key, latch, new AtomicReference<T>(null), readTimeout.get().intValue(), executorService, client);
final Operation op = opFact.get(key, new GetOperation.Callback() {
private Future<T> val = null;
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("Getting Key : " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
try {
if (val != null) {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val : " + val.get());
rv.set(val.get(), status);
} else {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val is null");
rv.set(null, status);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
rv.set(null, status);
}
}
@SuppressWarnings("unchecked")
public void gotData(String k, int flags, byte[] data) {
if (isWrongKeyReturned(key, k)) return;
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Read data : key " + key + "; flags : " + flags + "; data : " + data);
if (data != null) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + key + "; val size : " + data.length);
getDataSizeDistributionSummary(EVCacheMetricsFactory.GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
if (tc == null) {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final Transcoder<T> t = (Transcoder<T>) getTranscoder();
val = tcService.decode(t, new CachedData(flags, data, t.getMaxSize()));
}
} else {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
val = tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()));
}
}
} else {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + key + "; val is null" );
}
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.GET_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (val != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (listener != null) rv.addListener(listener);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> EVCacheBulkGetFuture<T> asyncGetBulk(Collection<String> keys,
final Transcoder<T> tc,
EVCacheGetOperationListener<T> listener) {
final Map<String, Future<T>> m = new ConcurrentHashMap<String, Future<T>>();
// Break the gets down into groups by key
final Map<MemcachedNode, Collection<String>> chunks = new HashMap<MemcachedNode, Collection<String>>();
final NodeLocator locator = mconn.getLocator();
//Populate Node and key Map
for (String key : keys) {
StringUtils.validateKey(key, opFact instanceof BinaryOperationFactory);
final MemcachedNode primaryNode = locator.getPrimary(key);
if (primaryNode.isActive()) {
Collection<String> ks = chunks.computeIfAbsent(primaryNode, k -> new ArrayList<>());
ks.add(key);
}
}
final AtomicInteger pendingChunks = new AtomicInteger(chunks.size());
int initialLatchCount = chunks.isEmpty() ? 0 : 1;
final CountDownLatch latch = new CountDownLatch(initialLatchCount);
final Collection<Operation> ops = new ArrayList<Operation>(chunks.size());
final EVCacheBulkGetFuture<T> rv = new EVCacheBulkGetFuture<T>(m, ops, latch, executorService, client);
GetOperation.Callback cb = new GetOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("GetBulk Keys : " + keys + "; Status : " + status.getStatusCode().name() + "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.setStatus(status);
}
@Override
public void gotData(String k, int flags, byte[] data) {
if (data != null) {
getDataSizeDistributionSummary(EVCacheMetricsFactory.BULK_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
}
m.put(k, tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())));
}
@Override
public void complete() {
if (pendingChunks.decrementAndGet() <= 0) {
latch.countDown();
getTimer(EVCacheMetricsFactory.BULK_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (m.size() == keys.size() ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), null, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
}
};
// Now that we know how many servers it breaks down into, and the latch
// is all set up, convert all of these strings collections to operations
final Map<MemcachedNode, Operation> mops = new HashMap<MemcachedNode, Operation>();
for (Map.Entry<MemcachedNode, Collection<String>> me : chunks.entrySet()) {
Operation op = opFact.get(me.getValue(), cb);
mops.put(me.getKey(), op);
ops.add(op);
}
assert mops.size() == chunks.size();
mconn.checkState();
mconn.addOperations(mops);
return rv;
}
public <T> EVCacheOperationFuture<CASValue<T>> asyncGetAndTouch(final String key, final int exp, final Transcoder<T> tc) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<CASValue<T>> rv = new EVCacheOperationFuture<CASValue<T>>(key, latch, new AtomicReference<CASValue<T>>(null), operationTimeout, executorService, client);
Operation op = opFact.getAndTouch(key, exp, new GetAndTouchOperation.Callback() {
private CASValue<T> val = null;
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("GetAndTouch Key : " + key + "; Status : " + status.getStatusCode().name()
+ (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.set(val, status);
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.GET_AND_TOUCH_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (val != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
public void gotData(String k, int flags, long cas, byte[] data) {
if (isWrongKeyReturned(key, k)) return;
if (data != null) getDataSizeDistributionSummary(EVCacheMetricsFactory.GET_AND_TOUCH_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
val = new CASValue<T>(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize())));
}
});
rv.setOperation(op);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> set(String key, int exp, T o, final Transcoder<T> tc) {
return asyncStore(StoreType.set, key, exp, o, tc, null);
}
public OperationFuture<Boolean> set(String key, int exp, Object o) {
return asyncStore(StoreType.set, key, exp, o, transcoder, null);
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> set(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.set, key, exp, o, t, latch);
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> replace(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.replace, key, exp, o, t, latch);
}
public <T> OperationFuture<Boolean> add(String key, int exp, T o, Transcoder<T> tc) {
return asyncStore(StoreType.add, key, exp, o, tc, null);
}
public OperationFuture<Boolean> delete(String key, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final DeleteOperation op = opFact.delete(key, new DeleteOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
rv.set(Boolean.TRUE, status);
}
@Override
public void gotData(long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.DELETE_OPERATION, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> touch(final String key, final int exp, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation op = opFact.touch(key, exp, new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
rv.set(status.isSuccess(), status);
}
@Override
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.TOUCH_OPERATION, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public <T> OperationFuture<Boolean> asyncAppendOrAdd(final String key, int exp, CachedData co, EVCacheLatch evcacheLatch) {
final CountDownLatch latch = new CountDownLatch(1);
if(co != null && co.getData() != null) getDataSizeDistributionSummary(EVCacheMetricsFactory.AOA_OPERATION, EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.IPC_SIZE_OUTBOUND).record(co.getData().length);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation opAppend = opFact.cat(ConcatenationType.append, 0, key, co.getData(), new OperationCallback() {
boolean appendSuccess = false;
@Override
public void receivedStatus(OperationStatus val) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("AddOrAppend Key (Append Operation): " + key + "; Status : " + val.getStatusCode().name()
+ "; Message : " + val.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
if (val.getStatusCode().equals(StatusCode.SUCCESS)) {
rv.set(Boolean.TRUE, val);
appendSuccess = true;
}
}
@Override
public void complete() {
if(appendSuccess) {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_APPEND, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);;
latch.countDown();
rv.signalComplete();
} else {
Operation opAdd = opFact.store(StoreType.add, key, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus addStatus) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("AddOrAppend Key (Add Operation): " + key + "; Status : " + addStatus.getStatusCode().name()
+ "; Message : " + addStatus.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
if(addStatus.isSuccess()) {
appendSuccess = true;
rv.set(addStatus.isSuccess(), addStatus);
} else {
Operation opReappend = opFact.cat(ConcatenationType.append, 0, key, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus retryAppendStatus) {
if (retryAppendStatus.getStatusCode().equals(StatusCode.SUCCESS)) {
rv.set(Boolean.TRUE, retryAppendStatus);
if (log.isDebugEnabled()) log.debug("AddOrAppend Retry append Key (Append Operation): " + key + "; Status : " + retryAppendStatus.getStatusCode().name()
+ "; Message : " + retryAppendStatus.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
} else {
rv.set(Boolean.FALSE, retryAppendStatus);
}
}
public void complete() {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_REAPPEND, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(opReappend);
mconn.enqueueOperation(key, opReappend);
}
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
if(appendSuccess) {
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.AOA_OPERATION_ADD, EVCacheMetricsFactory.WRITE, rv.getStatus(), EVCacheMetricsFactory.YES, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
latch.countDown();
rv.signalComplete();
}
}
});
rv.setOperation(opAdd);
mconn.enqueueOperation(key, opAdd);
}
}
});
rv.setOperation(opAppend);
mconn.enqueueOperation(key, opAppend);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
return rv;
}
private Timer getTimer(String operation, String operationType, OperationStatus status, String hit, String host, long maxDuration) {
String name = ((status != null) ? operation + status.getMessage() : operation );
if(hit != null) name = name + hit;
Timer timer = timerMap.get(name);
if(timer != null) return timer;
final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 4 + (host == null ? 0 : 1));
tagList.addAll(client.getTagList());
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
if(status != null) {
if(status.getStatusCode() == StatusCode.SUCCESS || status.getStatusCode() == StatusCode.ERR_NOT_FOUND || status.getStatusCode() == StatusCode.ERR_EXISTS) {
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, EVCacheMetricsFactory.SUCCESS));
} else {
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, EVCacheMetricsFactory.FAIL));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_STATUS, getStatusCode(status.getStatusCode())));
}
if(hit != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE_HIT, hit));
if(host != null) tagList.add(new BasicTag(EVCacheMetricsFactory.FAILED_HOST, host));
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.IPC_CALL, tagList, Duration.ofMillis(maxDuration));
timerMap.put(name, timer);
return timer;
}
private String getStatusCode(StatusCode sc) {
return EVCacheMetricsFactory.getInstance().getStatusCode(sc);
}
private DistributionSummary getDataSizeDistributionSummary(String operation, String type, String metric) {
DistributionSummary distributionSummary = distributionSummaryMap.get(operation);
if(distributionSummary != null) return distributionSummary;
final List<Tag> tagList = new ArrayList<Tag>(6);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, type));
distributionSummary = EVCacheMetricsFactory.getInstance().getDistributionSummary(metric, tagList);
distributionSummaryMap.put(operation, distributionSummary);
return distributionSummary;
}
private <T> OperationFuture<Boolean> asyncStore(final StoreType storeType, final String key, int exp, T value, Transcoder<T> tc, EVCacheLatch evcacheLatch) {
final CachedData co;
if (value instanceof CachedData) {
co = (CachedData) value;
} else {
co = tc.encode(value);
}
final CountDownLatch latch = new CountDownLatch(1);
final String operationStr;
if (storeType == StoreType.set) {
operationStr = EVCacheMetricsFactory.SET_OPERATION;
} else if (storeType == StoreType.add) {
operationStr = EVCacheMetricsFactory.ADD_OPERATION;
} else {
operationStr = EVCacheMetricsFactory.REPLACE_OPERATION;
}
if(co != null && co.getData() != null) getDataSizeDistributionSummary(operationStr, EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.IPC_SIZE_OUTBOUND).record(co.getData().length);
final EVCacheOperationFuture<Boolean> rv = new EVCacheOperationFuture<Boolean>(key, latch, new AtomicReference<Boolean>(null), operationTimeout, executorService, client);
final Operation op = opFact.store(storeType, key, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus val) {
if (log.isDebugEnabled()) log.debug("Storing Key : " + key + "; Status : " + val.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "") + "; Message : " + val.getMessage()
+ "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
rv.set(val.isSuccess(), val);
if (log.isTraceEnabled() && !val.getStatusCode().equals(StatusCode.SUCCESS)) log.trace(val.getStatusCode().name() + " storing Key : " + key , new Exception());
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
final String host = (((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) || rv.getStatus().getStatusCode().equals(StatusCode.ERR_NO_MEM)) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(operationStr, EVCacheMetricsFactory.WRITE, rv.getStatus(), null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
if (evcacheLatch != null && evcacheLatch instanceof EVCacheLatchImpl && !client.isInWriteOnly()) ((EVCacheLatchImpl) evcacheLatch).addFuture(rv);
mconn.enqueueOperation(key, op);
return rv;
}
public String toString() {
return appName + "-" + client.getZone() + "-" + client.getId();
}
@SuppressWarnings("unchecked")
public <T> OperationFuture<Boolean> add(String key, int exp, T o, final Transcoder<T> tc, EVCacheLatch latch) {
Transcoder<T> t = (Transcoder<T>) ((tc == null) ? transcoder : tc);
return asyncStore(StoreType.add, key, exp, o, t, latch);
}
public long incr(String key, long by, long def, int exp) {
return mutate(Mutator.incr, key, by, def, exp);
}
public long decr(String key, long by, long def, int exp) {
return mutate(Mutator.decr, key, by, def, exp);
}
public long mutate(final Mutator m, String key, long by, long def, int exp) {
final String operationStr = m.name();
final long start = System.currentTimeMillis();
final AtomicLong rv = new AtomicLong();
final CountDownLatch latch = new CountDownLatch(1);
final List<OperationStatus> statusList = new ArrayList<OperationStatus>(1);
final Operation op = opFact.mutate(m, key, by, def, exp, new OperationCallback() {
@Override
public void receivedStatus(OperationStatus s) {
statusList.add(s);
rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"));
}
@Override
public void complete() {
latch.countDown();
}
});
mconn.enqueueOperation(key, op);
long retVal = def;
try {
if(mutateOperationTimeout == null) {
mutateOperationTimeout = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".mutate.timeout", Long.class).orElse(connectionFactory.getOperationTimeout());
}
if (!latch.await(mutateOperationTimeout.get(), TimeUnit.MILLISECONDS)) {
if (log.isDebugEnabled()) log.debug("Mutation operation timeout. Will return -1");
retVal = -1;
} else {
retVal = rv.get();
}
} catch (Exception e) {
log.error("Exception on mutate operation : " + operationStr + " Key : " + key + "; by : " + by + "; default : " + def + "; exp : " + exp
+ "; val : " + retVal + "; Elapsed Time - " + (System.currentTimeMillis() - start), e);
}
final OperationStatus status = statusList.size() > 0 ? statusList.get(0) : null;
final String host = ((status != null && status.getStatusCode().equals(StatusCode.TIMEDOUT) && op != null) ? getHostName(op.getHandlingNode().getSocketAddress()) : null);
getTimer(operationStr, EVCacheMetricsFactory.WRITE, status, null, host, getWriteMetricMaxValue()).record((System.currentTimeMillis() - start), TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug(operationStr + " Key : " + key + "; by : " + by + "; default : " + def + "; exp : " + exp
+ "; val : " + retVal + "; Elapsed Time - " + (System.currentTimeMillis() - start));
return retVal;
}
public void reconnectNode(EVCacheNode evcNode ) {
final long upTime = System.currentTimeMillis() - evcNode.getCreateTime();
if (log.isDebugEnabled()) log.debug("Reconnecting node : " + evcNode + "; UpTime : " + upTime);
if(upTime > 30000) { //not more than once every 30 seconds : TODO make this configurable
final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 2);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.RECONNECT));
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILED_HOST, evcNode.getHostName()));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.CONFIG, tagList);
evcNode.setConnectTime(System.currentTimeMillis());
mconn.queueReconnect(evcNode);
}
}
public int getWriteMetricMaxValue() {
return maxWriteDuration.get().intValue();
}
public int getReadMetricMaxValue() {
return maxReadDuration.get().intValue();
}
private String getHostNameByKey(String key) {
MemcachedNode evcNode = getEVCacheNode(key);
return getHostName(evcNode.getSocketAddress());
}
private String getHostName(SocketAddress sa) {
if (sa == null) return null;
if(sa instanceof InetSocketAddress) {
return ((InetSocketAddress)sa).getHostName();
} else {
return sa.toString();
}
}
public EVCacheItemMetaData metaDebug(String key) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheItemMetaData rv = new EVCacheItemMetaData();
if(opFact instanceof EVCacheAsciiOperationFactory) {
final Operation op = ((EVCacheAsciiOperationFactory)opFact).metaDebug(key, new MetaDebugOperation.Callback() {
public void receivedStatus(OperationStatus status) {
if (!status.isSuccess()) {
if (log.isDebugEnabled()) log.debug("Unsuccessful stat fetch: %s", status);
}
if (log.isDebugEnabled()) log.debug("Getting Meta Debug: " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "") + "; Message : " + status.getMessage());
}
public void debugInfo(String k, String val) {
if (log.isDebugEnabled()) log.debug("key " + k + "; val : " + val);
if(k.equals("exp")) rv.setSecondsLeftToExpire(Long.parseLong(val) * -1);
else if(k.equals("la")) rv.setSecondsSinceLastAccess(Long.parseLong(val));
else if(k.equals("cas")) rv.setCas(Long.parseLong(val));
else if(k.equals("fetch")) rv.setHasBeenFetchedAfterWrite(Boolean.parseBoolean(val));
else if(k.equals("cls")) rv.setSlabClass(Integer.parseInt(val));
else if(k.equals("size")) rv.setSizeInBytes(Integer.parseInt(val));
}
public void complete() {
latch.countDown();
}});
mconn.enqueueOperation(key, op);
try {
if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) {
if (log.isDebugEnabled()) log.debug("meta debug operation timeout. Will return empty opbject.");
}
} catch (Exception e) {
log.error("Exception on meta debug operation : Key : " + key, e);
}
if (log.isDebugEnabled()) log.debug("Meta Debug Data : " + rv);
}
return rv;
}
public Map<SocketAddress, String> execCmd(final String cmd, String[] ips) {
final Map<SocketAddress, String> rv = new HashMap<SocketAddress, String>();
Collection<MemcachedNode> nodes = null;
if(ips == null || ips.length == 0) {
nodes = mconn.getLocator().getAll();
} else {
nodes = new ArrayList<MemcachedNode>(ips.length);
for(String ip : ips) {
for(MemcachedNode node : mconn.getLocator().getAll()) {
if(((InetSocketAddress)node.getSocketAddress()).getAddress().getHostAddress().equals(ip)) {
nodes.add(node);
}
}
}
}
if(nodes != null && !nodes.isEmpty()) {
CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() {
@Override
public Operation newOp(final MemcachedNode n, final CountDownLatch latch) {
final SocketAddress sa = n.getSocketAddress();
return ((EVCacheAsciiOperationFactory)opFact).execCmd(cmd, new ExecCmdOperation.Callback() {
@Override
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("cmd : " + cmd + "; MemcachedNode : " + n + "; Status : " + status);
rv.put(sa, status.getMessage());
}
@Override
public void complete() {
latch.countDown();
}
});
}
}, nodes);
try {
blatch.await(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for stats", e);
}
}
return rv;
}
public <T> EVCacheOperationFuture<EVCacheItem<T>> asyncMetaGet(final String key, final Transcoder<T> tc, EVCacheGetOperationListener<T> listener) {
final CountDownLatch latch = new CountDownLatch(1);
final EVCacheOperationFuture<EVCacheItem<T>> rv = new EVCacheOperationFuture<EVCacheItem<T>>(key, latch, new AtomicReference<EVCacheItem<T>>(null), readTimeout.get().intValue(), executorService, client);
if(opFact instanceof EVCacheAsciiOperationFactory) {
final Operation op = ((EVCacheAsciiOperationFactory)opFact).metaGet(key, new MetaGetOperation.Callback() {
private EVCacheItem<T> evItem = new EVCacheItem<T>();
public void receivedStatus(OperationStatus status) {
if (log.isDebugEnabled()) log.debug("Getting Key : " + key + "; Status : " + status.getStatusCode().name() + (log.isTraceEnabled() ? " Node : " + getEVCacheNode(key) : "")
+ "; Message : " + status.getMessage() + "; Elapsed Time - " + (System.currentTimeMillis() - rv.getStartTime()));
try {
if (evItem.getData() != null) {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val : " + evItem);
rv.set(evItem, status);
} else {
if (log.isTraceEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.trace("Key : " + key + "; val is null");
rv.set(null, status);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
rv.set(null, status);
}
}
@Override
public void gotMetaData(String k, char flag, String fVal) {
if (log.isDebugEnabled()) log.debug("key " + k + "; val : " + fVal + "; flag : " + flag);
if (isWrongKeyReturned(key, k)) return;
switch (flag) {
case 's':
evItem.getItemMetaData().setSizeInBytes(Integer.parseInt(fVal));
break;
case 'c':
evItem.getItemMetaData().setCas(Long.parseLong(fVal));
break;
case 'f':
evItem.setFlag(Integer.parseInt(fVal));
break;
case 'h':
evItem.getItemMetaData().setHasBeenFetchedAfterWrite(fVal.equals("1"));
break;
case 'l':
evItem.getItemMetaData().setSecondsSinceLastAccess(Long.parseLong(fVal));
break;
case 'O':
//opaque = val;
break;
case 't':
final int ttlLeft = Integer.parseInt(fVal);
evItem.getItemMetaData().setSecondsLeftToExpire(ttlLeft);
getDataSizeDistributionSummary(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.INTERNAL_TTL).record(ttlLeft);
break;
default:
break;
}
}
@Override
public void gotData(String k, int flag, byte[] data) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Read data : key " + k + "; flags : " + flag + "; data : " + data);
if (isWrongKeyReturned(key, k)) return;
if (data != null) {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + k + "; val size : " + data.length);
getDataSizeDistributionSummary(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, EVCacheMetricsFactory.IPC_SIZE_INBOUND).record(data.length);
if (tc == null) {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final Transcoder<T> t = (Transcoder<T>) getTranscoder();
final T item = t.decode(new CachedData(flag, data, t.getMaxSize()));
evItem.setData(item);
}
} else {
if (tcService == null) {
log.error("tcService is null, will not be able to decode");
throw new RuntimeException("TranscoderSevice is null. Not able to decode");
} else {
final T item = tc.decode(new CachedData(flag, data, tc.getMaxSize()));
evItem.setData(item);
}
}
} else {
if (log.isDebugEnabled() && client.getPool().getEVCacheClientPoolManager().shouldLog(appName)) log.debug("Key : " + k + "; val is null" );
}
}
public void complete() {
latch.countDown();
final String host = ((rv.getStatus().getStatusCode().equals(StatusCode.TIMEDOUT) && rv.getOperation() != null) ? getHostName(rv.getOperation().getHandlingNode().getSocketAddress()) : null);
getTimer(EVCacheMetricsFactory.META_GET_OPERATION, EVCacheMetricsFactory.READ, rv.getStatus(), (evItem.getData() != null ? EVCacheMetricsFactory.YES : EVCacheMetricsFactory.NO), host, getReadMetricMaxValue()).record((System.currentTimeMillis() - rv.getStartTime()), TimeUnit.MILLISECONDS);
rv.signalComplete();
}
});
rv.setOperation(op);
mconn.enqueueOperation(key, op);
if (log.isDebugEnabled()) log.debug("Meta_Get Data : " + rv);
}
return rv;
}
}
| 737 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheMemcachedNodeROImpl.java
|
package net.spy.memcached;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.SocketChannel;
import java.util.Collection;
import net.spy.memcached.ops.Operation;
public class EVCacheMemcachedNodeROImpl implements MemcachedNode {
private final MemcachedNode root;
public EVCacheMemcachedNodeROImpl(MemcachedNode n) {
super();
root = n;
}
public String toString() {
return root.toString();
}
public void addOp(Operation op) {
throw new UnsupportedOperationException();
}
public void insertOp(Operation op) {
throw new UnsupportedOperationException();
}
public void connected() {
throw new UnsupportedOperationException();
}
public void copyInputQueue() {
throw new UnsupportedOperationException();
}
public void fillWriteBuffer(boolean optimizeGets) {
throw new UnsupportedOperationException();
}
public void fixupOps() {
throw new UnsupportedOperationException();
}
public int getBytesRemainingToWrite() {
return root.getBytesRemainingToWrite();
}
public SocketChannel getChannel() {
throw new UnsupportedOperationException();
}
public Operation getCurrentReadOp() {
throw new UnsupportedOperationException();
}
public Operation getCurrentWriteOp() {
throw new UnsupportedOperationException();
}
public ByteBuffer getRbuf() {
throw new UnsupportedOperationException();
}
public int getReconnectCount() {
return root.getReconnectCount();
}
public int getSelectionOps() {
return root.getSelectionOps();
}
public SelectionKey getSk() {
throw new UnsupportedOperationException();
}
public SocketAddress getSocketAddress() {
return root.getSocketAddress();
}
public ByteBuffer getWbuf() {
throw new UnsupportedOperationException();
}
public boolean hasReadOp() {
return root.hasReadOp();
}
public boolean hasWriteOp() {
return root.hasReadOp();
}
public boolean isActive() {
return root.isActive();
}
public void reconnecting() {
throw new UnsupportedOperationException();
}
public void registerChannel(SocketChannel ch, SelectionKey selectionKey) {
throw new UnsupportedOperationException();
}
public Operation removeCurrentReadOp() {
throw new UnsupportedOperationException();
}
public Operation removeCurrentWriteOp() {
throw new UnsupportedOperationException();
}
public void setChannel(SocketChannel to) {
throw new UnsupportedOperationException();
}
public void setSk(SelectionKey to) {
throw new UnsupportedOperationException();
}
public void setupResend() {
throw new UnsupportedOperationException();
}
public void transitionWriteItem() {
throw new UnsupportedOperationException();
}
public int writeSome() throws IOException {
throw new UnsupportedOperationException();
}
public Collection<Operation> destroyInputQueue() {
throw new UnsupportedOperationException();
}
public void authComplete() {
throw new UnsupportedOperationException();
}
public void setupForAuth() {
throw new UnsupportedOperationException();
}
public int getContinuousTimeout() {
throw new UnsupportedOperationException();
}
public void setContinuousTimeout(boolean isIncrease) {
throw new UnsupportedOperationException();
}
public boolean isAuthenticated() {
throw new UnsupportedOperationException();
}
public long lastReadDelta() {
throw new UnsupportedOperationException();
}
public void completedRead() {
throw new UnsupportedOperationException();
}
public MemcachedConnection getConnection() {
throw new UnsupportedOperationException();
}
public void setConnection(MemcachedConnection connection) {
throw new UnsupportedOperationException();
}
}
| 738 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/EVCacheNode.java
|
package net.spy.memcached;
import java.util.List;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.Tag;
public interface EVCacheNode extends MemcachedNode {
void registerMonitors();
boolean isAvailable(EVCache.Call call);
int getWriteQueueSize();
int getReadQueueSize();
int getInputQueueSize();
long incrOps();
long getNumOfOps();
void flushInputQueue();
long getStartTime();
long getTimeoutStartTime();
void removeMonitoring();
void shutdown();
long getCreateTime();
void setConnectTime(long cTime);
String getAppName();
String getHostName();
ServerGroup getServerGroup();
int getId();
List<Tag> getTags();
int getTotalReconnectCount();
String getSocketChannelLocalAddress();
String getSocketChannelRemoteAddress();
String getConnectTime();
int getContinuousTimeout();
int getReconnectCount();
boolean isActive();
EVCacheClient getEVCacheClient();
}
| 739 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/ExecCmdOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import java.util.Arrays;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class ExecCmdOperationImpl extends OperationImpl implements ExecCmdOperation {
private static final OperationStatus OK = new OperationStatus(true, "OK",
StatusCode.SUCCESS);
private static final OperationStatus ERROR = new OperationStatus(true, "ERROR",
StatusCode.ERR_INTERNAL);
private final byte[] cmd;
public ExecCmdOperationImpl(String arg, ExecCmdOperation.Callback c) {
super(c);
this.cmd = (arg + "\r\n").getBytes();
}
@Override
public void initialize() {
setBuffer(ByteBuffer.wrap(cmd));
}
@Override
public void handleLine(String line) {
if (line.equals("OK")) {
callback.receivedStatus(OK);
transitionState(OperationState.COMPLETE);
} else if (line.equals("ERROR")) {
callback.receivedStatus(ERROR);
transitionState(OperationState.COMPLETE);
}
}
@Override
protected void wasCancelled() {
callback.receivedStatus(CANCELLED);
}
@Override
public String toString() {
return "Cmd: " + Arrays.toString(cmd);
}
}
| 740 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaArithmeticOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.MutatorOperation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.StatusCode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
/**
* Operation for Meta Arithmetic commands of memcached.
*/
public class MetaArithmeticOperationImpl extends EVCacheOperationImpl implements
MutatorOperation {
private static final Logger log = LoggerFactory.getLogger(MetaArithmeticOperationImpl.class);
private static final OperationStatus NOT_FOUND = new OperationStatus(false,
"NOT_FOUND", StatusCode.ERR_NOT_FOUND);
// TODO : Move to a Builder as we expand this to support better isolation guarantees
// Request
private static final String META_ARITHMETIC_OP = "ma";
private static final String AUTO_CREATE = "N%d";
private static final String MUTATOR_MODE ="M%c";
private static final char INCR = '+';
private static final char DECR = '-';
private static final String DEFAULT = "J%d";
private static final String DELTA = "D%d";
private static final char FLAG_VALUE = 'v';
// Response
private static final String VALUE_RETURN = "VA";
private final Mutator mutator;
private final String key;
private final long amount;
private final long def;
private final int exp;
private boolean readingValue;
public static final int OVERHEAD = 32;
public MetaArithmeticOperationImpl(Mutator m, String k, long amt, long defaultVal,
int expiry, OperationCallback c) {
super(c);
mutator = m;
key = k;
amount = amt;
def = defaultVal;
exp = expiry;
readingValue = false;
}
@Override
public void handleLine(String line) {
log.debug("Result: %s", line);
OperationStatus found = null;
if (line.startsWith(VALUE_RETURN)) {
// TODO : We may need to tokenize this when more flags are supplied to the request.
this.readingValue = true;
// Ask state machine to read the next line which has the response
this.setReadType(OperationReadType.LINE);
return;
} else if (readingValue) {
// TODO : Tokenize if multiple values are in this line, as of now, it's just the result.
found = new OperationStatus(true, line, StatusCode.SUCCESS);
} else {
// TODO: Other NF/NS/EX and also OK are treated as errors, this will change as we extend the meta API
found = NOT_FOUND;
}
getCallback().receivedStatus(found);
transitionState(OperationState.COMPLETE);
}
@Override
public void initialize() {
int size = KeyUtil.getKeyBytes(key).length + OVERHEAD;
ByteBuffer b = ByteBuffer.allocate(size);
setArguments(b, META_ARITHMETIC_OP, key, String.format(AUTO_CREATE, exp),
String.format(MUTATOR_MODE, (mutator == Mutator.incr ? INCR : DECR)),
String.format(DEFAULT,def), String.format(DELTA,amount), FLAG_VALUE);
b.flip();
setBuffer(b);
}
public Collection<String> getKeys() {
return Collections.singleton(key);
}
public long getBy() {
return amount;
}
public long getDefault() {
return def;
}
public int getExpiration() {
return exp;
}
public Mutator getType() {
return mutator;
}
@Override
public String toString() {
return "Cmd: " + mutator.name() + " Key: " + key + " Amount: " + amount +
" Default: " + def + " Expiry: " + exp;
}
}
| 741 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/EVCacheAsciiNodeImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.joda.time.format.ISODateTimeFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.ConnectionFactory;
import net.spy.memcached.EVCacheNode;
import net.spy.memcached.EVCacheNodeMBean;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.protocol.ProxyCallback;
import net.spy.memcached.protocol.TCPMemcachedNodeImpl;
public class EVCacheAsciiNodeImpl extends TCPMemcachedNodeImpl implements EVCacheNodeMBean, EVCacheNode {
private static final Logger log = LoggerFactory.getLogger(EVCacheAsciiNodeImpl.class);
protected long stTime;
protected final String hostName;
protected final BlockingQueue<Operation> readQ;
protected final BlockingQueue<Operation> inputQueue;
protected final EVCacheClient client;
private final AtomicInteger numOps = new AtomicInteger(0);
private long timeoutStartTime;
protected final Counter operationsCounter;
public EVCacheAsciiNodeImpl(SocketAddress sa, SocketChannel c, int bufSize, BlockingQueue<Operation> rq, BlockingQueue<Operation> wq, BlockingQueue<Operation> iq,
long opQueueMaxBlockTimeMillis, boolean waitForAuth, long dt, long at, ConnectionFactory fa, EVCacheClient client, long stTime) {
// ASCII never does auth
super(sa, c, bufSize, rq, wq, iq, opQueueMaxBlockTimeMillis, false, dt, at, fa);
this.client = client;
final String appName = client.getAppName();
this.readQ = rq;
this.inputQueue = iq;
this.hostName = ((InetSocketAddress) getSocketAddress()).getHostName();
this.operationsCounter = client.getOperationCounter();
setConnectTime(stTime);
setupMonitoring(appName);
}
@Override
protected void optimize() {
// make sure there are at least two get operations in a row before
// attempting to optimize them.
if (writeQ.peek() instanceof GetOperation) {
optimizedOp = writeQ.remove();
if (writeQ.peek() instanceof GetOperation) {
OptimizedGetImpl og = new OptimizedGetImpl((GetOperation) optimizedOp);
optimizedOp = og;
while (writeQ.peek() instanceof GetOperation) {
GetOperationImpl o = (GetOperationImpl) writeQ.remove();
if (!o.isCancelled()) {
og.addOperation(o);
}
}
// Initialize the new mega get
optimizedOp.initialize();
assert optimizedOp.getState() == OperationState.WRITE_QUEUED;
ProxyCallback pcb = (ProxyCallback) og.getCallback();
getLogger().debug("Set up %s with %s keys and %s callbacks", this,
pcb.numKeys(), pcb.numCallbacks());
}
}
}
private String getMonitorName(String appName) {
return "com.netflix.evcache:Group=" + appName + ",SubGroup=pool" + ",SubSubGroup=" + client.getServerGroupName()
+ ",SubSubSubGroup=" + client.getId() + ",SubSubSubSubGroup=" + hostName
+ "_" + stTime;
}
private void setupMonitoring(String appName) {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(appName));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
public void registerMonitors() {
}
public boolean isAvailable(EVCache.Call call) {
return isActive();
}
public int getWriteQueueSize() {
return writeQ.size();
}
public int getReadQueueSize() {
return readQ.size();
}
public int getInputQueueSize() {
return inputQueue.size();
}
public long incrOps() {
operationsCounter.increment();
return numOps.incrementAndGet();
}
public long getNumOfOps() {
return numOps.get();
}
public void flushInputQueue() {
inputQueue.clear();
}
public long getStartTime() {
return stTime;
}
public long getTimeoutStartTime() {
return timeoutStartTime;
}
public void removeMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(client.getAppName()));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
public void shutdown() {
removeMonitoring();
writeQ.clear();
readQ.clear();
inputQueue.clear();
}
public long getCreateTime() {
return stTime;
}
public void setConnectTime(long cTime) {
this.stTime = cTime;
}
public String getAppName() {
return client.getAppName();
}
public String getHostName() {
return hostName;
}
public ServerGroup getServerGroup() {
return client.getServerGroup();
}
public int getId() {
return client.getId();
}
public List<Tag> getTags() {
return client.getTagList();
}
public int getTotalReconnectCount() {
return getReconnectCount();
}
@Override
public String getSocketChannelLocalAddress() {
try {
if(getChannel() != null) {
return getChannel().getLocalAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
@Override
public String getSocketChannelRemoteAddress() {
try {
if(getChannel() != null) {
return getChannel().getRemoteAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
@Override
public String getConnectTime() {
return ISODateTimeFormat.dateTime().print(stTime);
}
@Override
public EVCacheClient getEVCacheClient() {
return client;
}
}
| 742 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaDebugOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class MetaDebugOperationImpl extends EVCacheOperationImpl implements MetaDebugOperation {
private static final Logger log = LoggerFactory.getLogger(MetaDebugOperationImpl.class);
private static final OperationStatus END = new OperationStatus(true, "EN", StatusCode.SUCCESS);
private static final int OVERHEAD = 32;
private final MetaDebugOperation.Callback cb;
private final String key;
public MetaDebugOperationImpl(String k, MetaDebugOperation.Callback cb) {
super(cb);
this.key = k;
this.cb = cb;
}
@Override
public void handleLine(String line) {
if(log.isDebugEnabled()) log.debug("meta debug of {} returned {}", key, line);
if (line.equals("EN")) {
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
} else {
String[] parts = line.split(" ", 3);
if(log.isDebugEnabled()) log.debug("Num of parts "+ parts.length);
if(parts.length <= 2) return;
String[] kvPairs = parts[2].split(" ");
for(String kv : kvPairs) {
if(log.isDebugEnabled()) log.debug("kv "+ kv);
String[] tuple = kv.split("=",2);
if(log.isDebugEnabled()) log.debug("{} = {}", tuple[0], tuple[1]);
cb.debugInfo(tuple[0], tuple[1]);
}
}
getCallback().receivedStatus(matchStatus(line, END));
transitionState(OperationState.COMPLETE);
}
@Override
public void initialize() {
ByteBuffer b = ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + OVERHEAD);
setArguments(b, "me", key);
b.flip();
setBuffer(b);
}
@Override
public String toString() {
return "Cmd: me Key: " + key;
}
}
| 743 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/ExecCmdOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface ExecCmdOperation extends Operation {
/**
* Callback for cmd operation.
*/
interface Callback extends OperationCallback {
}
}
| 744 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/EVCacheOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.OperationCallback;
public class EVCacheOperationImpl extends OperationImpl {
protected EVCacheOperationImpl(OperationCallback cb) {
super(cb);
}
@Override
public void handleLine(String line) {
// TODO Auto-generated method stub
}
@Override
public void initialize() {
// TODO Auto-generated method stub
}
}
| 745 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaGetOperationImpl.java
|
package net.spy.memcached.protocol.ascii;
import java.nio.ByteBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.spy.memcached.KeyUtil;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatusCode;
public class MetaGetOperationImpl extends EVCacheOperationImpl implements MetaGetOperation {
private static final Logger log = LoggerFactory.getLogger(MetaGetOperationImpl.class);
private static final OperationStatus END = new OperationStatus(true, "EN", StatusCode.SUCCESS);
private static final int OVERHEAD = 32;
private final MetaGetOperation.Callback cb;
private final String key;
private int currentFlag = -1;
private byte[] data = null;
private int readOffset = 0;
private byte lookingFor = '\0';
public MetaGetOperationImpl(String k, MetaGetOperation.Callback cb) {
super(cb);
this.key = k;
this.cb = cb;
}
@Override
public void handleLine(String line) {
if(log.isDebugEnabled()) log.debug("meta get of {} returned {}", key, line);
if (line.length() == 0 || line.equals("EN")) {
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
} else if (line.startsWith("VA")) {
String[] parts = line.split(" ");
if(log.isDebugEnabled()) log.debug("Num of parts "+ parts.length);
if(parts.length <= 2) return;
int size = Integer.parseInt(parts[1]);
if(log.isDebugEnabled()) log.debug("Size of value in bytes : "+ size);
data = new byte[size];
for(int i = 2; i < parts.length; i++) {
final char flag = parts[i].charAt(0);
final String val = parts[i].substring(1);
if(log.isDebugEnabled()) log.debug("flag="+ flag + "; Val=" + val);
cb.gotMetaData(key, flag, val);
if(flag == 'f') currentFlag = Integer.parseInt(val);
}
setReadType(OperationReadType.DATA);
}
}
public void handleRead(ByteBuffer b) {
if(log.isDebugEnabled()) log.debug("readOffset: {}, length: {}", readOffset, data.length);
// If we're not looking for termination, we're still looking for data
if (lookingFor == '\0') {
int toRead = data.length - readOffset;
int available = b.remaining();
toRead = Math.min(toRead, available);
if(log.isDebugEnabled()) log.debug("Reading {} bytes", toRead);
b.get(data, readOffset, toRead);
readOffset += toRead;
}
// Transition us into a ``looking for \r\n'' kind of state if we've
// read enough and are still in a data state.
if (readOffset == data.length && lookingFor == '\0') {
// The callback is most likely a get callback. If it's not, then
// it's a gets callback.
OperationCallback cb = getCallback();
if (cb instanceof MetaGetOperation.Callback) {
MetaGetOperation.Callback mgcb = (MetaGetOperation.Callback) cb;
mgcb.gotData(key, currentFlag, data);
}
lookingFor = '\r';
}
// If we're looking for an ending byte, let's go find it.
if (lookingFor != '\0' && b.hasRemaining()) {
do {
byte tmp = b.get();
assert tmp == lookingFor : "Expecting " + lookingFor + ", got "
+ (char) tmp;
switch (lookingFor) {
case '\r':
lookingFor = '\n';
break;
case '\n':
lookingFor = '\0';
break;
default:
assert false : "Looking for unexpected char: " + (char) lookingFor;
}
} while (lookingFor != '\0' && b.hasRemaining());
// Completed the read, reset stuff.
if (lookingFor == '\0') {
data = null;
readOffset = 0;
currentFlag = -1;
getCallback().receivedStatus(END);
transitionState(OperationState.COMPLETE);
getLogger().debug("Setting read type back to line.");
setReadType(OperationReadType.LINE);
}
}
}
@Override
public void initialize() {
final String flags = "s f t h l c v";
final ByteBuffer b = ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + flags.length() + OVERHEAD);
setArguments(b, "mg", key, flags);
b.flip();
setBuffer(b);
}
@Override
public String toString() {
return "Cmd: me Key: " + key;
}
}
| 746 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaGetOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface MetaGetOperation extends Operation {
/**
* Operation callback for the get request.
*/
public interface Callback extends OperationCallback {
/**
* Callback for each result from each meta data.
*
* @param key the key that was retrieved
* @param flag all the flag
* @param data the data for the flag
*/
void gotMetaData(String key, char flag, String data);
/**
* Callback for result from a get.
*
* @param key the key that was retrieved
* @param flag the flag for this value
* @param data the data stored under this key
*/
void gotData(String key, int flag, byte[] data);
}
}
| 747 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/ascii/MetaDebugOperation.java
|
package net.spy.memcached.protocol.ascii;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
public interface MetaDebugOperation extends Operation {
/**
* Operation callback for the get request.
*/
public interface Callback extends OperationCallback {
/**
* Callback for each result from a get.
*
* @param key the key that was retrieved
* @param flags the flags for this value
* @param data the data stored under this key
*/
void debugInfo(String key, String val);
}
}
| 748 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol
|
Create_ds/EVCache/evcache-core/src/main/java/net/spy/memcached/protocol/binary/EVCacheNodeImpl.java
|
package net.spy.memcached.protocol.binary;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.joda.time.format.ISODateTimeFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCache;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.ConnectionFactory;
import net.spy.memcached.EVCacheNode;
import net.spy.memcached.EVCacheNodeMBean;
import net.spy.memcached.ops.Operation;
//import sun.misc.Cleaner;
//import sun.nio.ch.DirectBuffer;
@SuppressWarnings("restriction")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "FCBL_FIELD_COULD_BE_LOCAL", "EXS_EXCEPTION_SOFTENING_NO_CHECKED",
"REC_CATCH_EXCEPTION", "SCII_SPOILED_CHILD_INTERFACE_IMPLEMENTATOR" })
public class EVCacheNodeImpl extends BinaryMemcachedNodeImpl implements EVCacheNodeMBean, EVCacheNode {
private static final Logger log = LoggerFactory.getLogger(EVCacheNodeImpl.class);
protected long stTime;
protected final String hostName;
protected final BlockingQueue<Operation> readQ;
protected final BlockingQueue<Operation> inputQueue;
protected final EVCacheClient client;
//protected Counter reconnectCounter;
private final AtomicInteger numOps = new AtomicInteger(0);
private long timeoutStartTime;
protected final Counter operationsCounter;
public EVCacheNodeImpl(SocketAddress sa, SocketChannel c, int bufSize, BlockingQueue<Operation> rq, BlockingQueue<Operation> wq, BlockingQueue<Operation> iq,
long opQueueMaxBlockTimeMillis, boolean waitForAuth, long dt, long at, ConnectionFactory fa, EVCacheClient client, long stTime) {
super(sa, c, bufSize, rq, wq, iq, Long.valueOf(opQueueMaxBlockTimeMillis), waitForAuth, dt, at, fa);
this.client = client;
final String appName = client.getAppName();
this.readQ = rq;
this.inputQueue = iq;
this.hostName = ((InetSocketAddress) getSocketAddress()).getHostName();
// final List<Tag> tagsCounter = new ArrayList<Tag>(5);
// tagsCounter.add(new BasicTag(EVCacheMetricsFactory.CACHE, client.getAppName()));
// tagsCounter.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName()));
// tagsCounter.add(new BasicTag(EVCacheMetricsFactory.ZONE, client.getZone()));
//tagsCounter.add(new BasicTag(EVCacheMetricsFactory.HOST, hostName)); //TODO : enable this and see what is the impact
this.operationsCounter = client.getOperationCounter();
setConnectTime(stTime);
setupMonitoring(appName);
}
private String getMonitorName(String appName) {
return "com.netflix.evcache:Group=" + appName + ",SubGroup=pool" + ",SubSubGroup=" + client.getServerGroupName()
+ ",SubSubSubGroup=" + client.getId() + ",SubSubSubSubGroup=" + hostName
+ "_" + stTime;
}
private void setupMonitoring(String appName) {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(appName));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#registerMonitors()
*/
@Override
public void registerMonitors() {
// try {
// EVCacheMetricsFactory.getInstance().getRegistry().register(this);
// } catch (Exception e) {
// if (log.isWarnEnabled()) log.warn("Exception while registering.", e);
// }
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#isAvailable(com.netflix.evcache.EVCache.Call)
*/
@Override
public boolean isAvailable(EVCache.Call call) {
return isActive();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getWriteQueueSize()
*/
@Override
public int getWriteQueueSize() {
return writeQ.size();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getReadQueueSize()
*/
@Override
public int getReadQueueSize() {
return readQ.size();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getInputQueueSize()
*/
@Override
public int getInputQueueSize() {
return inputQueue.size();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#incrOps()
*/
@Override
public long incrOps() {
operationsCounter.increment();
return numOps.incrementAndGet();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getNumOfOps()
*/
@Override
public long getNumOfOps() {
return numOps.get();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#flushInputQueue()
*/
@Override
public void flushInputQueue() {
inputQueue.clear();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getStartTime()
*/
@Override
public long getStartTime() {
return stTime;
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getTimeoutStartTime()
*/
@Override
public long getTimeoutStartTime() {
return timeoutStartTime;
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#removeMonitoring()
*/
@Override
public void removeMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance(getMonitorName(client.getAppName()));
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception while setting up the monitoring.", e);
}
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#shutdown()
*/
@Override
public void shutdown() {
removeMonitoring();
writeQ.clear();
readQ.clear();
inputQueue.clear();
try {
// Cleanup the ByteBuffers only if they are sun.nio.ch.DirectBuffer
// If we don't cleanup then we will leak 16K of memory
// if (getRbuf() instanceof DirectBuffer) {
// Cleaner cleaner = ((DirectBuffer) getRbuf()).cleaner();
// if (cleaner != null) cleaner.clean();
// cleaner = ((DirectBuffer) getWbuf()).cleaner();
// if (cleaner != null) cleaner.clean();
// }
} catch (Throwable t) {
getLogger().error("Exception cleaning ByteBuffer.", t);
}
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getCreateTime()
*/
@Override
public long getCreateTime() {
return stTime;
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#setConnectTime(long)
*/
@Override
public void setConnectTime(long cTime) {
this.stTime = cTime;
// if(reconnectCounter == null) {
// final List<Tag> tags = new ArrayList<Tag>(5);
// tags.add(new BasicTag(EVCacheMetricsFactory.CACHE, client.getAppName()));
// tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName()));
// tags.add(new BasicTag(EVCacheMetricsFactory.ZONE, client.getZone()));
// tags.add(new BasicTag(EVCacheMetricsFactory.HOST, hostName));
// tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, EVCacheMetricsFactory.RECONNECT));
// this.reconnectCounter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_RECONNECT, tags);
//
// }
// reconnectCounter.increment();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getAppName()
*/
@Override
public String getAppName() {
return client.getAppName();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getHostName()
*/
@Override
public String getHostName() {
return hostName;
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getServerGroup()
*/
@Override
public ServerGroup getServerGroup() {
return client.getServerGroup();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getId()
*/
@Override
public int getId() {
return client.getId();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getTags()
*/
@Override
public List<Tag> getTags() {
return client.getTagList();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getTotalReconnectCount()
*/
@Override
public int getTotalReconnectCount() {
// if(reconnectCounter == null) return 0;
// return (int)reconnectCounter.count();
return getReconnectCount();
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getSocketChannelLocalAddress()
*/
@Override
public String getSocketChannelLocalAddress() {
try {
if(getChannel() != null) {
return getChannel().getLocalAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getSocketChannelRemoteAddress()
*/
@Override
public String getSocketChannelRemoteAddress() {
try {
if(getChannel() != null) {
return getChannel().getRemoteAddress().toString();
}
} catch (IOException e) {
log.error("Exception", e);
}
return "NULL";
}
/* (non-Javadoc)
* @see net.spy.memcached.protocol.binary.EVCacheNode1#getConnectTime()
*/
@Override
public String getConnectTime() {
return ISODateTimeFormat.dateTime().print(stTime);
}
@Override
public EVCacheClient getEVCacheClient() {
return client;
}
}
| 749 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheImpl.java
|
package com.netflix.evcache;
import static com.netflix.evcache.util.Sneaky.sneakyThrow;
import java.lang.management.ManagementFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.stream.Collectors;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import com.netflix.evcache.dto.KeyMapDto;
import com.netflix.evcache.util.EVCacheBulkDataDto;
import com.netflix.evcache.util.KeyHasher;
import com.netflix.evcache.util.RetryCount;
import com.netflix.evcache.util.Sneaky;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.evcache.EVCacheInMemoryCache.DataNotFoundException;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.operation.EVCacheFuture;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.operation.EVCacheLatchImpl;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import com.netflix.evcache.pool.ChunkTranscoder;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheClientUtil;
import com.netflix.evcache.pool.EVCacheValue;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.Transcoder;
import rx.Observable;
import rx.Scheduler;
import rx.Single;
/**
* An implementation of a ephemeral volatile cache.
*
* @author smadappa
* @version 2.0
*/
@SuppressWarnings("unchecked")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "WMI_WRONG_MAP_ITERATOR",
"DB_DUPLICATE_BRANCHES", "REC_CATCH_EXCEPTION","RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" })
public class EVCacheImpl implements EVCache, EVCacheImplMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheImpl.class);
private final String _appName;
private final String _cacheName;
private final String _metricPrefix;
protected final Transcoder<?> _transcoder;
private final boolean _zoneFallback;
private final boolean _throwException;
private final int _timeToLive; // defaults to 15 minutes
protected EVCacheClientPool _pool;
private final Property<Boolean> _throwExceptionFP, _zoneFallbackFP, _useInMemoryCache;
private final Property<Boolean> _bulkZoneFallbackFP;
private final Property<Boolean> _bulkPartialZoneFallbackFP;
private final List<Tag> tags;
private EVCacheInMemoryCache<?> cache;
private EVCacheClientUtil clientUtil = null;
private final Property<Boolean> ignoreTouch;
private final Property<Boolean> hashKey;
private final Property<String> hashingAlgo;
private final Property<Boolean> shouldEncodeHashKey;
private final Property<Integer> maxDigestBytes;
private final Property<Integer> maxHashLength;
private final EVCacheTranscoder evcacheValueTranscoder;
private final Property<Integer> maxReadDuration, maxWriteDuration;
protected final EVCacheClientPoolManager _poolManager;
private final Map<String, Timer> timerMap = new ConcurrentHashMap<String, Timer>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Property<Boolean> _eventsUsingLatchFP, autoHashKeys;
private DistributionSummary bulkKeysSize = null;
private final Property<Integer> maxKeyLength;
private final Property<String> alias;
private final Property<String> encoderBase;
EVCacheImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback,
boolean throwException, EVCacheClientPoolManager poolManager) {
this._appName = appName;
this._cacheName = cacheName;
if(_cacheName != null && _cacheName.length() > 0) {
for(int i = 0; i < cacheName.length(); i++) {
if(Character.isWhitespace(cacheName.charAt(i))){
throw new IllegalArgumentException("Cache Prefix ``" + cacheName + "`` contains invalid character at position " + i );
}
}
}
this._timeToLive = timeToLive;
this._transcoder = transcoder;
this._zoneFallback = enableZoneFallback;
this._throwException = throwException;
tags = new ArrayList<Tag>(3);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
if(_cacheName != null && _cacheName.length() > 0) tags.add(new BasicTag(EVCacheMetricsFactory.PREFIX, _cacheName));
final String _metricName = (_cacheName == null) ? _appName : _appName + "." + _cacheName;
_metricPrefix = _appName + "-";
this._poolManager = poolManager;
this._pool = poolManager.getEVCacheClientPool(_appName);
final PropertyRepository propertyRepository = poolManager.getEVCacheConfig().getPropertyRepository();
_throwExceptionFP = propertyRepository.get(_metricName + ".throw.exception", Boolean.class).orElseGet(_appName + ".throw.exception").orElse(false);
_zoneFallbackFP = propertyRepository.get(_metricName + ".fallback.zone", Boolean.class).orElseGet(_appName + ".fallback.zone").orElse(true);
_bulkZoneFallbackFP = propertyRepository.get(_appName + ".bulk.fallback.zone", Boolean.class).orElse(true);
_bulkPartialZoneFallbackFP = propertyRepository.get(_appName+ ".bulk.partial.fallback.zone", Boolean.class).orElse(true);
if(_cacheName == null) {
_useInMemoryCache = propertyRepository.get(_appName + ".use.inmemory.cache", Boolean.class).orElseGet("evcache.use.inmemory.cache").orElse(false);
} else {
_useInMemoryCache = propertyRepository.get(_appName + "." + _cacheName + ".use.inmemory.cache", Boolean.class).orElseGet(_appName + ".use.inmemory.cache").orElseGet("evcache.use.inmemory.cache").orElse(false);
}
_eventsUsingLatchFP = propertyRepository.get(_appName + ".events.using.latch", Boolean.class).orElseGet("evcache.events.using.latch").orElse(false);
maxReadDuration = propertyRepository.get(_appName + ".max.read.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(20);
maxWriteDuration = propertyRepository.get(_appName + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50);
ignoreTouch = propertyRepository.get(appName + ".ignore.touch", Boolean.class).orElse(false);
this.hashKey = propertyRepository.get(appName + ".hash.key", Boolean.class).orElse(false);
this.hashingAlgo = propertyRepository.get(appName + ".hash.algo", String.class).orElse("siphash24");
this.shouldEncodeHashKey = propertyRepository.get(appName + ".hash.encode", Boolean.class).orElse(true);
this.maxDigestBytes = propertyRepository.get(appName + ".max.digest.bytes", Integer.class).orElse(-1);
this.maxHashLength = propertyRepository.get(appName + ".max.hash.length", Integer.class).orElse(-1);
this.encoderBase = propertyRepository.get(appName + ".hash.encoder", String.class).orElse("base64");
this.autoHashKeys = propertyRepository.get(_appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false);
this.evcacheValueTranscoder = new EVCacheTranscoder();
evcacheValueTranscoder.setCompressionThreshold(Integer.MAX_VALUE);
// default max key length is 200, instead of using what is defined in MemcachedClientIF.MAX_KEY_LENGTH (250). This is to accommodate
// auto key prepend with appname for duet feature.
this.maxKeyLength = propertyRepository.get(_appName + ".max.key.length", Integer.class).orElseGet("evcache.max.key.length").orElse(200);
// if alias changes, refresh my pool to point to the correct alias app
this.alias = propertyRepository.get("EVCacheClientPoolManager." + appName + ".alias", String.class);
this.alias.subscribe(i -> {
this._pool = poolManager.getEVCacheClientPool(_appName);
});
_pool.pingServers();
setupMonitoring();
}
private void setupMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName
+ ",SubGroup=Impl");
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
EVCacheKey getEVCacheKey(final String key) {
if(key == null || key.length() == 0) throw new NullPointerException("Key cannot be null or empty");
for(int i = 0; i < key.length(); i++) {
if(Character.isWhitespace(key.charAt(i))){
throw new IllegalArgumentException("key ``" + key + "`` contains invalid character at position " + i );
}
}
final String canonicalKey;
if (this._cacheName == null) {
canonicalKey = key;
} else {
final int keyLength = _cacheName.length() + 1 + key.length();
canonicalKey = new StringBuilder(keyLength).append(_cacheName).append(':').append(key).toString();
}
if (canonicalKey.length() > this.maxKeyLength.get() && !hashKey.get() && !autoHashKeys.get()) {
throw new IllegalArgumentException("Key is too long (maxlen = " + this.maxKeyLength.get() + ')');
}
boolean shouldHashKeyAtAppLevel = hashKey.get() || (canonicalKey.length() > this.maxKeyLength.get() && autoHashKeys.get());
final EVCacheKey evcKey = new EVCacheKey(_appName, key, canonicalKey, shouldHashKeyAtAppLevel ? KeyHasher.getHashingAlgorithmFromString(hashingAlgo.get()) : null, this.shouldEncodeHashKey, this.maxDigestBytes, this.maxHashLength, this.encoderBase.get());
if (log.isDebugEnabled() && shouldLog()) log.debug("Key : " + key + "; EVCacheKey : " + evcKey);
return evcKey;
}
private boolean hasZoneFallbackForBulk() {
if (!_pool.supportsFallback()) return false;
if (!_bulkZoneFallbackFP.get()) return false;
return _zoneFallback;
}
private boolean hasZoneFallback() {
if (!_pool.supportsFallback()) return false;
if (!_zoneFallbackFP.get().booleanValue()) return false;
return _zoneFallback;
}
private boolean shouldLog() {
return _poolManager.shouldLog(_appName);
}
private boolean doThrowException() {
return (_throwException || _throwExceptionFP.get().booleanValue());
}
private List<EVCacheEventListener> getEVCacheEventListeners() {
return _poolManager.getEVCacheEventListeners();
}
private EVCacheEvent createEVCacheEvent(Collection<EVCacheClient> clients, Call call) {
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
if (evcacheEventListenerList == null || evcacheEventListenerList.size() == 0) return null;
final EVCacheEvent event = new EVCacheEvent(call, _appName, _cacheName, _pool);
event.setClients(clients);
return event;
}
private boolean shouldThrottle(EVCacheEvent event) {
for (EVCacheEventListener evcacheEventListener : getEVCacheEventListeners()) {
try {
if (evcacheEventListener.onThrottle(event)) {
return true;
}
} catch(Exception e) {
incrementEventFailure("throttle", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing throttle event on listener " + evcacheEventListener + " for event " + event, e);
}
}
return false;
}
private void startEvent(EVCacheEvent event) {
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onStart(event);
} catch(Exception e) {
incrementEventFailure("start", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing start event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private void endEvent(EVCacheEvent event) {
event.setEndTime(System.currentTimeMillis());
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onComplete(event);
} catch(Exception e) {
incrementEventFailure("end", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing end event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private void eventError(EVCacheEvent event, Throwable t) {
event.setEndTime(System.currentTimeMillis());
final List<EVCacheEventListener> evcacheEventListenerList = getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
try {
evcacheEventListener.onError(event, t);
} catch(Exception e) {
incrementEventFailure("error", event.getCall(), evcacheEventListener.getClass().getName());
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing error event on listener " + evcacheEventListener + " for event " + event, e);
}
}
}
private <T> EVCacheInMemoryCache<T> getInMemoryCache(Transcoder<T> tc) {
if (cache == null) cache = _poolManager.createInMemoryCache(tc, this);
return (EVCacheInMemoryCache<T>) cache;
}
public <T> T get(String key) throws EVCacheException {
return this.get(key, (Transcoder<T>) _transcoder);
}
private void incrementFastFail(String metric, Call call) {
final String name = metric + call.name();
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(call != null) {
final String operation = call.name();
final String operationType;
switch(call) {
case GET:
case GET_AND_TOUCH:
case GETL:
case BULK:
case COMPLETABLE_FUTURE_GET:
case COMPLETABLE_FUTURE_GET_BULK:
case ASYNC_GET:
operationType = EVCacheMetricsFactory.READ;
break;
default :
operationType = EVCacheMetricsFactory.WRITE;
}
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.FAST_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
private void incrementEventFailure(String metric, Call call, String event) {
final String name = metric + call.name() + event;
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(call != null) {
final String operation = call.name();
final String operationType;
switch(call) {
case GET:
case GET_AND_TOUCH:
case GETL:
case BULK:
case ASYNC_GET:
operationType = EVCacheMetricsFactory.READ;
break;
default :
operationType = EVCacheMetricsFactory.WRITE;
}
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
}
tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT_STAGE, metric));
tagList.add(new BasicTag(EVCacheMetricsFactory.EVENT, event));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_EVENT_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
private void incrementFailure(String metric, String operation, String operationType) {
final String name = metric + operation;
Counter counter = counterMap.get(name);
if(counter == null) {
final List<Tag> tagList = new ArrayList<Tag>(tags.size() + 3);
tagList.addAll(tags);
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
tagList.add(new BasicTag(EVCacheMetricsFactory.FAILURE_REASON, metric));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.INTERNAL_FAIL, tagList);
counterMap.put(name, counter);
}
counter.increment();
}
public <T> T get(String key, Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
if (_useInMemoryCache.get()) {
T value = null;
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
} catch (ExecutionException e) {
final boolean throwExc = doThrowException();
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw (EVCacheException)e.getCause();
}
throw new EVCacheException("ExecutionException", e);
}
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
if (value != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
return value;
} else {
if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
}
}
return doGet(evcKey, tc);
}
<T> T doGet(EVCacheKey evcKey , Transcoder<T> tc) throws EVCacheException {
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
T data = getData(client, evcKey, tc, throwEx, hasZF);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "GHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
return getAsyncInMemory(evcKey, tc)
.thenCompose(data -> data == null
? doAsyncGet(evcKey, tc)
: CompletableFuture.completedFuture(data));
}
public <T> CompletableFuture<T> getAsync(String key) {
return this.getAsync(key, (Transcoder<T>) _transcoder);
}
private <T> T getInMemory(EVCacheKey evcKey, Transcoder<T> tc) throws Exception {
if (_useInMemoryCache.get()) {
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
T value = getInMemoryCache(transcoder).get(evcKey);
if (value != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from in-memory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
return value;
} else {
if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in in-memory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
}
} catch (Exception e) {
return handleInMemoryException(e);
}
}
return null;
}
private <T> CompletableFuture<T> getAsyncInMemory(EVCacheKey evcKey, Transcoder<T> tc) {
CompletableFuture<T> promise = new CompletableFuture<>();
try {
if(log.isDebugEnabled() && shouldLog()) {
log.debug("Retrieving value from memory {} ", evcKey.getKey());
}
T t = getInMemory(evcKey, tc);
promise.complete(t);
} catch (Exception ex) {
promise.completeExceptionally(ex);
}
return promise;
}
private <T> T handleInMemoryException(Exception e) throws Exception {
final boolean throwExc = doThrowException();
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("DataNotFoundException while getting data from InMemory Cache", e);
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data from InMemory Cache", e);
throw e;
} else {
throw new EVCacheException("ExecutionException", e);
}
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Throws Exception is false and returning null in this case");
return null;
}
}
private <T> CompletableFuture<T> doAsyncGet(EVCacheKey evcKey, Transcoder<T> tc) {
CompletableFuture<T> errorFuture = new CompletableFuture<>();
final boolean throwExc = doThrowException();
//Building the client
EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET, errorFuture);
if (errorFuture.isCompletedExceptionally() || client == null) {
if (client == null ) {
if (log.isDebugEnabled() && shouldLog()) log.debug("client is null");
errorFuture.complete(null);
}
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client");
//Building the start event
EVCacheEvent event = buildAndStartEvent(client,
Collections.singletonList(evcKey),
throwExc,
errorFuture,
Call.COMPLETABLE_FUTURE_GET);
if (errorFuture.isCompletedExceptionally()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event");
return errorFuture;
}
errorFuture.cancel(false);
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS);
StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES);
final boolean hasZF = hasZoneFallback();
RetryCount retryCount = new RetryCount();
boolean throwEx = !hasZF && throwExc;
return getAsyncData(client, evcKey, tc)
.thenCompose(data -> handleRetry(data, evcKey, tc, client, hasZF, throwExc, event, retryCount))
.handle((data, ex) -> {
if (ex != null) {
handleMissData(event, evcKey, client, cacheOperation);
handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET);
handleException(ex, event);
if (throwEx) {
throw new RuntimeException(ex);
} else {
return null;
}
} else {
handleFinally(data, status, retryCount.get(), client, cacheOperation, start, Call.COMPLETABLE_FUTURE_GET);
handleData(data, event, evcKey, client, cacheOperation);
return data;
}
});
}
private <T> EVCacheClient buildEvCacheClient(boolean throwExc, Call callType, CompletableFuture<T> completableFuture) {
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, callType);
if (throwExc) completableFuture.completeExceptionally(new EVCacheException("Could not find a client to get the data APP " + _appName));
return null;
}
return client;
}
private <T> EVCacheEvent buildAndStartEvent(EVCacheClient client,
List<EVCacheKey> evcKeys,
boolean throwExc,
CompletableFuture<T> completableFuture,
Call callType) {
EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), callType);
if (event != null) {
event.setEVCacheKeys(evcKeys);
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, callType);
if (throwExc)
completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & keys " + evcKeys));
return null;
}
startEvent(event);
return event;
}
return null;
}
private <T> void handleBulkFinally(StringBuilder status,
RetryCount tries,
EVCacheClient client,
StringBuilder cacheOperation,
Collection<String> keys,
Long start) {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
if (bulkKeysSize == null) {
final List<Tag> tagList = new ArrayList<Tag>(4);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ));
bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList);
}
bulkKeysSize.record(keys.size());
getTimer(Call.COMPLETABLE_FUTURE_GET_BULK.name(),
EVCacheMetricsFactory.READ,
cacheOperation.toString(),
status.toString(),
tries.get(),
maxReadDuration.get(),
client.getServerGroup())
.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("ASYNC GET BULK : APP "
+ _appName + " Took " + duration + " milliSec to get the value for key " + keys);
}
private <T> T handleFinally(T data,
StringBuilder status,
Integer tries,
EVCacheClient client,
StringBuilder cacheOperation,
Long start,
Call call) {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime() - start;
getTimer(call.name(),
EVCacheMetricsFactory.READ,
cacheOperation.toString(),
status.toString(),
tries,
maxReadDuration.get(),
client.getServerGroup())
.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog())
log.debug("GET ASYNC : APP " + _appName + ", Took " + duration + " milliSec.");
return data;
}
private void handleException(Throwable ex, EVCacheEvent event) {
if (ex.getCause() instanceof RuntimeException) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Handling exception with cause ", ex.getCause());
}
Throwable runTimeCause = ex.getCause();
if (runTimeCause.getCause() instanceof ExecutionException) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Handling ExecutionException with cause ",runTimeCause.getCause());
}
Throwable executionExceptionCause = runTimeCause.getCause();
if (executionExceptionCause.getCause() instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Setting Status as Timeout");
}
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
}
return;
}
}
if (event != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Setting event as Error");
}
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
}
private <T> void handleMissData(EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) {
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO);
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog())
log.info("GET ASYNC : APP " + _appName + " ; cache miss for key : " + evcKey);
endEvent(null, evcKey, client, event);
}
private <T> void handleData(T data, EVCacheEvent event, EVCacheKey evcKey, EVCacheClient client, StringBuilder cacheOperation) {
if (event != null) event.setAttribute("status", "GHIT");
endEvent(data, evcKey, client, event);
}
private <T> void endEvent(T data, EVCacheKey evcKey, EVCacheClient client, EVCacheEvent event) {
if (log.isDebugEnabled() && shouldLog())
log.debug("COMPLETABLE FUTURE GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
}
private <T> CompletableFuture<T> handleRetries(List<EVCacheClient> fbClients,
int fbClientIndex,
EVCacheEvent event,
EVCacheKey evcKey,
Transcoder<T> tc,
RetryCount retryCount) {
if (fbClientIndex >= fbClients.size()) {
return CompletableFuture.completedFuture(null);
}
if (log.isDebugEnabled() && shouldLog()) {
log.debug("searching key in the server {}", fbClients.get(fbClientIndex).getServerGroup().getName());
}
CompletableFuture<T> future = getAsyncData(
fbClients.get(fbClientIndex),
event,
evcKey,
tc);
int nextIndex = fbClientIndex + 1;
retryCount.incr();
return future.thenApply(s -> s != null ?
handleSuccessCompletion(s, evcKey, fbClients, fbClientIndex, retryCount) :
handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount))
.exceptionally(t -> handleRetries(fbClients, nextIndex, event, evcKey, tc, retryCount))
.thenCompose(Function.identity());
}
public <T> CompletableFuture<T> handleSuccessCompletion(T s, EVCacheKey key, List<EVCacheClient> fbClients, int index, RetryCount retryCount) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetched the key {} from server {} and retry count {}", key.getKey(), fbClients.get(index).getServerGroup().getName(), retryCount.get());
}
return CompletableFuture.completedFuture(s);
}
private <T> CompletableFuture<T> handleRetry(T data,
EVCacheKey evcKey,
Transcoder<T> tc,
EVCacheClient client,
boolean hasZF,
boolean throwExc,
EVCacheEvent event,
RetryCount retryCount) {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
return handleRetries(fbClients, 0, event, evcKey, tc, retryCount);
}
return CompletableFuture.completedFuture(data);
}
public EVCacheItemMetaData metaDebug(String key) throws EVCacheException {
return this.metaDebugInternal(key, false);
}
protected EVCacheItemMetaData metaDebugInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_DEBUG);
if (throwExc) throw new EVCacheException("Could not find a client to get the metadata for APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_DEBUG);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_DEBUG);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
EVCacheItemMetaData data = getEVCacheItemMetaData(client, evcKey, throwEx, hasZF, isOriginalKeyHashed);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getEVCacheItemMetaData(fbClient, evcKey, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "MDHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "MDMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("META_DEBUG : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting with metadata for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.META_DEBUG.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("META_DEBUG : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException {
return this.metaGetInternal(key, tc, false);
}
protected <T> EVCacheItem<T> metaGetInternal(String key, Transcoder<T> tc, boolean isOriginalKeyHashed) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
final EVCacheKey evcKey = getEVCacheKey(key);
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.META_GET);
if (throwExc) throw new EVCacheException("Could not find a client to get the data APP " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.META_GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.META_GET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
EVCacheItem<T> data = getEVCacheItem(client, evcKey, tc, throwEx, hasZF, isOriginalKeyHashed, true);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getEVCacheItem(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false, isOriginalKeyHashed, true);
if (log.isDebugEnabled() && shouldLog()) log.debug("Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "MGHIT");
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (event != null) event.setAttribute("status", "MGMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("META_GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting with meta data for APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting with meta data for APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.META_GET.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("META_GET : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
private int policyToCount(Policy policy, int count) {
if (policy == null) return 0;
switch (policy) {
case NONE:
return 0;
case ONE:
return 1;
case QUORUM:
if (count == 0)
return 0;
else if (count <= 2)
return count;
else
return (count / 2) + 1;
case ALL_MINUS_1:
if (count == 0)
return 0;
else if (count <= 2)
return 1;
else
return count - 1;
default:
return count;
}
}
public <T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException {
if (null == key) throw new IllegalArgumentException();
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data");
return null; // Fast failure
}
final int expectedSuccessCount = policyToCount(policy, clients.length);
if(expectedSuccessCount <= 1) return get(key, tc);
final long startTime = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
try {
final List<Future<T>> futureList = new ArrayList<Future<T>>(clients.length);
final long endTime = startTime + _pool.getReadTimeout().get().intValue();
for (EVCacheClient client : clients) {
final Future<T> future = getGetFuture(client, key, tc, throwExc);
futureList.add(future);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Future " + future + " for key : " + key + " with policy : " + policy + " for client : " + client);
}
final Map<T, List<EVCacheClient>> evcacheClientMap = new HashMap<T, List<EVCacheClient>>();
//final Map<T, Integer> tMap = new HashMap<T,Integer>();
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Total Requests " + clients.length + "; Expected Success Count : " + expectedSuccessCount);
for(Future<T> future : futureList) {
try {
if(future instanceof EVCacheOperationFuture) {
EVCacheOperationFuture<T> evcacheOperationFuture = (EVCacheOperationFuture<T>)future;
long duration = endTime - System.currentTimeMillis();
if(duration < 20) duration = 20;
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : block duration : " + duration);
final T t = evcacheOperationFuture.get(duration, TimeUnit.MILLISECONDS, throwExc, false);
if (log.isTraceEnabled() && shouldLog()) log.trace("GET : CONSISTENT : value : " + t);
if(t != null) {
final List<EVCacheClient> cList = evcacheClientMap.computeIfAbsent(t, k -> new ArrayList<EVCacheClient>(clients.length));
cList.add(evcacheOperationFuture.getEVCacheClient());
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Added Client to ArrayList " + cList);
}
}
} catch (Exception e) {
log.error("Exception",e);
}
}
T retVal = null;
/* TODO : use metaget to get TTL and set it. For now we will delete the inconsistent value */
for(Entry<T, List<EVCacheClient>> entry : evcacheClientMap.entrySet()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Existing Count for Value : " + entry.getValue().size() + "; expectedSuccessCount : " + expectedSuccessCount);
if(entry.getValue().size() >= expectedSuccessCount) {
retVal = entry.getKey();
} else {
for(EVCacheClient client : entry.getValue()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : Delete in-consistent vale from : " + client);
client.delete(key);
}
}
}
if(retVal != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was met. Will return the value. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds.");
return retVal;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : policy : " + policy + " was NOT met. Will return NULL. Total Duration : " + (System.currentTimeMillis() - startTime) + " milli Seconds.");
return null;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key = " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- startTime;
getTimer(Call.GET_ALL.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : CONSISTENT : APP " + _appName + ", Took " + duration + " milliSec.");
}
}
public <T> Single<T> get(String key, Scheduler scheduler) {
return this.get(key, (Transcoder<T>) _transcoder, scheduler);
}
public <T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler) {
if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null"));
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET);
return Single.error(new EVCacheException("Could not find a client to get the data APP " + _appName));
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET);
return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final boolean hasZF = hasZoneFallback();
final boolean throwEx = hasZF ? false : throwExc;
return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
return Observable.concat(Observable.from(fbClients).map(
fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler) //TODO : for the last one make sure to pass throwExc
//.doOnSuccess(fbData -> increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT")))
.toObservable()))
.firstOrDefault(null, fbData -> (fbData != null)).toSingle();
}
}
return Single.just(data);
}).map(data -> {
//increment("GetCall");
if (data != null) {
//increment("GetHit");
if (event != null) event.setAttribute("status", "GHIT");
} else {
//increment("GetMiss");
if (event != null) event.setAttribute("status", "GMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup());
if (event != null) endEvent(event);
return data;
}).onErrorReturn(ex -> {
if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException getting data for APP " + _appName + ", key = "
+ evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName
+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex));
} else {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("Exception getting data for APP " + _appName + ", key = " + evcKey, ex));
}
}).doAfterTerminate(() -> {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET : APP " + _appName + ", Took " + duration + " milliSec.");
});
}
private <T> T getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception {
if (client == null) return null;
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
try {
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if(hashKey != null) {
final Object obj = client.get(hashKey, evcacheValueTranscoder, throwException, hasZF);
if(obj != null && obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue)obj;
if(!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
return transcoder.decode(cd);
} else {
return null;
}
} else {
return client.get(canonicalKey, transcoder, throwException, hasZF);
}
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
private <T> CompletableFuture<T> getAsyncData(EVCacheClient client,
EVCacheEvent event,
EVCacheKey key,
Transcoder<T> tc) {
if (event != null) {
if (shouldThrottle(event)) {
CompletableFuture<T> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
return completableFuture;
}
}
return getAsyncData(client, key, tc);
}
private <T> CompletableFuture<T> getAsyncData(EVCacheClient client,
EVCacheKey evcKey,
Transcoder<T> tc) {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if (hashKey != null) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Fetching data with hashKey {} ", hashKey);
}
return client.getAsync(hashKey, evcacheValueTranscoder)
.thenApply(val -> getData(transcoder, canonicalKey, val))
.exceptionally(ex -> handleClientException(hashKey, ex));
} else {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("Fetching data with canonicalKey {} ", canonicalKey);
}
return client.getAsync(canonicalKey, transcoder)
.exceptionally(ex -> handleClientException(canonicalKey, ex));
}
}
private <T> T handleClientException(String evcKey, Throwable ex) {
if (log.isDebugEnabled() && shouldLog())
log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey + ":" + ex);
throw sneakyThrow(ex);
}
private <T> T getData(Transcoder<T> transcoder, String canonicalKey, Object obj) {
if (obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue) obj;
if (!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
return transcoder.decode(cd);
} else {
return null;
}
}
protected EVCacheItemMetaData getEVCacheItemMetaData(EVCacheClient client, EVCacheKey evcKey, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed) throws Exception {
if (client == null) return null;
try {
return client.metaDebug(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()));
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with metadata for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with metadata for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
protected <T> EVCacheItem<T> getEVCacheItem(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, boolean isOriginalKeyHashed, boolean desearilizeEVCacheValue) throws Exception {
if (client == null) return null;
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
try {
String hashKey = isOriginalKeyHashed ? evcKey.getKey() : evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if (hashKey != null) {
if(desearilizeEVCacheValue) {
final EVCacheItem<Object> obj = client.metaGet(hashKey, evcacheValueTranscoder, throwException, hasZF);
if (null == obj) return null;
if (obj.getData() instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue) obj.getData();
if (null == val) {
return null;
}
// compare the key embedded in the value to the original key only if the original key is not passed hashed
if (!isOriginalKeyHashed && !(val.getKey().equals(canonicalKey))) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.META_GET.name(), EVCacheMetricsFactory.META_GET_OPERATION);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
T t = transcoder.decode(cd);
obj.setData(t);
obj.setFlag(val.getFlags());
return (EVCacheItem<T>) obj;
} else {
return null;
}
} else {
final EVCacheItem<CachedData> obj = client.metaGet(hashKey, new ChunkTranscoder(), throwException, hasZF);
if (null == obj) return null;
return (EVCacheItem<T>) obj;
}
} else {
return client.metaGet(canonicalKey, transcoder, throwException, hasZF);
}
} catch (EVCacheConnectException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheConnectException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheReadQueueException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (EVCacheException ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting with meta data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw ex;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting with meta data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
private <T> Single<T> getData(int index, int size, EVCacheClient client, EVCacheKey canonicalKey, Transcoder<T> tc, boolean throwEx, boolean throwExc, boolean hasZF, Scheduler scheduler) {
if(index >= size -1) throwEx = throwExc;
return getData(client, canonicalKey, tc, throwEx, hasZF, scheduler);
}
private <T> Single<T> getData(EVCacheClient client, EVCacheKey evcKey, Transcoder<T> tc, boolean throwException, boolean hasZF, Scheduler scheduler) {
if (client == null) return Single.error(new IllegalArgumentException("Client cannot be null"));
if(evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
return Single.error(new IllegalArgumentException("Not supported"));
} else {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
return client.get(evcKey.getCanonicalKey(client.isDuetClient()), transcoder, throwException, hasZF, scheduler).onErrorReturn(ex -> {
if (ex instanceof EVCacheReadQueueException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheReadQueueException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
} else if (ex instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("EVCacheException while getting data for APP " + _appName + ", key : " + evcKey + "; hasZF : " + hasZF, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getting data for APP " + _appName + ", key : " + evcKey, ex);
if (!throwException || hasZF) return null;
throw sneakyThrow(ex);
}
});
}
}
private final int MAX_IN_SEC = 2592000;
private void checkTTL(int timeToLive, Call call) throws IllegalArgumentException {
try {
if(timeToLive < 0) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be great than or equal to 0.");
final long currentTimeInMillis = System.currentTimeMillis();
if(timeToLive > currentTimeInMillis) throw new IllegalArgumentException ("Time to Live ( " + timeToLive + ") must be in seconds.");
if(timeToLive > MAX_IN_SEC && timeToLive < currentTimeInMillis/1000) throw new IllegalArgumentException ("If providing Time to Live ( " + timeToLive + ") in seconds as epoc value, it should be greater than current time " + currentTimeInMillis/1000);
} catch (IllegalArgumentException iae) {
incrementFastFail(EVCacheMetricsFactory.INVALID_TTL, call);
throw iae;
}
}
public <T> T getAndTouch(String key, int timeToLive) throws EVCacheException {
return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder);
}
public <T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler) {
return this.getAndTouch(key, timeToLive, (Transcoder<T>) _transcoder, scheduler);
}
public <T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler) {
if (null == key) return Single.error(new IllegalArgumentException("Key cannot be null"));
checkTTL(timeToLive, Call.GET_AND_TOUCH);
if(hashKey.get()) {
return Single.error(new IllegalArgumentException("Not supported"));
}
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH);
return Single.error(new EVCacheException("Could not find a client to get and touch the data for APP " + _appName));
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
return Single.error(new EVCacheException("Request Throttled for app " + _appName + " & key " + key));
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final boolean hasZF = hasZoneFallback();
final boolean throwEx = hasZF ? false : throwExc;
//anyway we have to touch all copies so let's just reuse getData instead of getAndTouch
return getData(client, evcKey, tc, throwEx, hasZF, scheduler).flatMap(data -> {
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
return Observable.concat(Observable.from(fbClients).map(
//TODO : for the last one make sure to pass throwExc
fbClient -> getData(fbClients.indexOf(fbClient), fbClients.size(), fbClient, evcKey, tc, throwEx, throwExc, false, scheduler)
.doOnSuccess(fbData -> {
//increment("RETRY_" + ((fbData == null) ? "MISS" : "HIT"));
})
.toObservable()))
.firstOrDefault(null, fbData -> (fbData != null)).toSingle();
}
}
return Single.just(data);
}).map(data -> {
//increment("GetCall");
if (data != null) {
//increment("GetHit");
if (event != null) event.setAttribute("status", "THIT");
// touch all copies
try {
touchData(evcKey, timeToLive);
} catch (Exception e) {
throw sneakyThrow(new EVCacheException("Exception performing touch for APP " + _appName + ", key = " + evcKey, e));
}
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client .getServerGroup());
} else {
//increment("GetMiss");
if (event != null) event.setAttribute("status", "TMISS");
if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey);
}
if (event != null) endEvent(event);
return data;
}).onErrorReturn(ex -> {
if (ex instanceof net.spy.memcached.internal.CheckedOperationTimeoutException) {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.TIMEOUT);
eventError(event, ex);
}
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex));
} else {
if (event != null) {
event.setStatus(EVCacheMetricsFactory.ERROR);
eventError(event, ex);
}
if (event != null) eventError(event, ex);
if (!throwExc) return null;
throw sneakyThrow(new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex));
}
}).doAfterTerminate(() -> {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, null, EVCacheMetricsFactory.SUCCESS, 1, maxReadDuration.get().intValue(),client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", Took " + duration+ " milliSec.");
});
}
@Override
public <T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key cannot be null");
checkTTL(timeToLive, Call.GET_AND_TOUCH);
final EVCacheKey evcKey = getEVCacheKey(key);
if (_useInMemoryCache.get()) {
final boolean throwExc = doThrowException();
T value = null;
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
} catch (ExecutionException e) {
if(throwExc) {
if(e.getCause() instanceof DataNotFoundException) {
return null;
}
if(e.getCause() instanceof EVCacheException) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw (EVCacheException)e.getCause();
}
throw new EVCacheException("ExecutionException", e);
}
}
if (value != null) {
try {
touchData(evcKey, timeToLive);
} catch (Exception e) {
if (throwExc) throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, e);
}
return value;
}
}
if(ignoreTouch.get()) {
return doGet(evcKey, tc);
} else {
return doGetAndTouch(evcKey, timeToLive, tc);
}
}
<T> T doGetAndTouch(EVCacheKey evcKey, int timeToLive, Transcoder<T> tc) throws EVCacheException {
final boolean throwExc = doThrowException();
EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.GET_AND_TOUCH);
if (throwExc) throw new EVCacheException("Could not find a client to get and touch the data for App " + _appName);
return null; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.GET_AND_TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.GET_AND_TOUCH);
return null;
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
String status = EVCacheMetricsFactory.SUCCESS;
try {
final boolean hasZF = hasZoneFallback();
boolean throwEx = hasZF ? false : throwExc;
T data = getData(client, evcKey, tc, throwEx, hasZF);
if (data == null && hasZF) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKey);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
data = getData(fbClient, evcKey, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("GetAndTouch Retry for APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + fbClient.getServerGroup());
if (data != null) {
client = fbClient;
break;
}
}
}
if (data != null) {
if (event != null) event.setAttribute("status", "THIT");
// touch all copies
touchData(evcKey, timeToLive);
if (log.isDebugEnabled() && shouldLog()) log.debug("GET_AND_TOUCH : APP " + _appName + ", key [" + evcKey + (log.isTraceEnabled() ? "], Value [" + data : "") + "], ServerGroup : " + client.getServerGroup());
} else {
cacheOperation = EVCacheMetricsFactory.NO;
if (log.isInfoEnabled() && shouldLog()) log.info("GET_AND_TOUCH : APP " + _appName + " ; cache miss for key : " + evcKey);
if (event != null) event.setAttribute("status", "TMISS");
}
if (event != null) endEvent(event);
return data;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key : " + evcKey, ex);
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException executing getAndTouch APP " + _appName + ", key = " + evcKey
+ ".\nYou can set the following property to increase the timeout " + _appName+ ".EVCacheClientPool.readTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception executing getAndTouch APP " + _appName + ", key = " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.GET_AND_TOUCH.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to get&Touch the value for APP " + _appName + ", key " + evcKey);
}
}
@Override
public Future<Boolean>[] touch(String key, int timeToLive) throws EVCacheException {
checkTTL(timeToLive, Call.TOUCH);
final EVCacheLatch latch = this.touch(key, timeToLive, null);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future;
eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
public <T> EVCacheLatch touch(String key, int timeToLive, Policy policy) throws EVCacheException {
if (null == key) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.TOUCH);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.TOUCH);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.TOUCH);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.TOUCH);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
try {
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
touchData(evcKey, timeToLive, clients, latch);
if (event != null) {
event.setTTL(timeToLive);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception touching the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTTLDistributionSummary(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive);
getTimer(Call.TOUCH.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("TOUCH : APP " + _appName + " for key : " + evcKey + " with timeToLive : " + timeToLive);
}
}
private void touchData(EVCacheKey evcKey, int timeToLive) throws Exception {
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
touchData(evcKey, timeToLive, clients);
}
private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients) throws Exception {
touchData(evcKey, timeToLive, clients, null);
}
private void touchData(EVCacheKey evcKey, int timeToLive, EVCacheClient[] clients, EVCacheLatch latch ) throws Exception {
checkTTL(timeToLive, Call.TOUCH);
for (EVCacheClient client : clients) {
client.touch(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), timeToLive, latch);
}
}
public <T> Future<T> getAsynchronous(String key) throws EVCacheException {
return this.getAsynchronous(key, (Transcoder<T>) _transcoder);
};
@Override
public <T> Future<T> getAsynchronous(final String key, final Transcoder<T> tc) throws EVCacheException {
if (null == key) throw new IllegalArgumentException("Key is null.");
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ASYNC_GET);
if (throwExc) throw new EVCacheException("Could not find a client to asynchronously get the data");
return null; // Fast failure
}
return getGetFuture(client, key, tc, throwExc);
}
private <T> Future<T> getGetFuture(final EVCacheClient client, final String key, final Transcoder<T> tc, final boolean throwExc) throws EVCacheException {
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.ASYNC_GET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ASYNC_GET);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final Future<T> r;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
try {
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
if(hashKey != null) {
final Future<Object> objFuture = client.asyncGet(hashKey, evcacheValueTranscoder, throwExc, false);
r = new Future<T> () {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return objFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return objFuture.isCancelled();
}
@Override
public boolean isDone() {
return objFuture.isDone();
}
@Override
public T get() throws InterruptedException, ExecutionException {
return getFromObj(objFuture.get());
}
private T getFromObj(Object obj) {
if(obj != null && obj instanceof EVCacheValue) {
final EVCacheValue val = (EVCacheValue)obj;
if(!val.getKey().equals(canonicalKey)) {
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ);
return null;
}
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
return transcoder.decode(cd);
} else {
return null;
}
}
@Override
public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return getFromObj(objFuture.get(timeout, unit));
}
};
} else {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) client.getTranscoder() : (Transcoder<T>) _transcoder) : tc;
r = client.asyncGet(canonicalKey, transcoder, throwExc, false);
}
if (event != null) endEvent(event);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug( "Exception while getting data for keys Asynchronously APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting data for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.ASYNC_GET.name(), EVCacheMetricsFactory.READ, null, status, 1, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("Took " + duration + " milliSec to execute AsyncGet the value for APP " + _appName + ", key " + key);
}
return r;
}
private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client,
EVCacheEvent event,
List<EVCacheKey> keys,
Transcoder<T> tc) {
if (event != null) {
if (shouldThrottle(event)) {
throw sneakyThrow(new EVCacheException("Request Throttled for app " + _appName + " & key " + keys));
}
}
return getAsyncBulkData(client, keys, tc);
}
private <T> CompletableFuture<Map<EVCacheKey, T>> getAsyncBulkData(EVCacheClient client,
List<EVCacheKey> evcacheKeys,
Transcoder<T> tc) {
KeyMapDto keyMapDto = buildKeyMap(client, evcacheKeys);
final Map<String, EVCacheKey> keyMap = keyMapDto.getKeyMap();
boolean hasHashedKey = keyMapDto.isKeyHashed();
if (hasHashedKey) {
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetching bulk data with hashedKey {} ",evcacheKeys);
}
return client.getAsyncBulk(keyMap.keySet(), evcacheValueTranscoder)
.thenApply(data -> buildHashedKeyValueResult(data, tc, client, keyMap))
.exceptionally(t -> handleBulkException(t, evcacheKeys));
} else {
final Transcoder<T> tcCopy;
if (tc == null && _transcoder != null) {
tcCopy = (Transcoder<T>) _transcoder;
} else {
tcCopy = tc;
}
if (log.isDebugEnabled() && shouldLog()) {
log.debug("fetching bulk data with non hashedKey {} ",keyMap.keySet());
}
return client.getAsyncBulk(keyMap.keySet(), tcCopy )
.thenApply(data -> buildNonHashedKeyValueResult(data, keyMap))
.exceptionally(t -> handleBulkException(t, evcacheKeys));
}
}
private <T> Map<EVCacheKey, T> handleBulkException(Throwable t, Collection<EVCacheKey> evCacheKeys) {
if (log.isDebugEnabled() && shouldLog())
log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evCacheKeys, t);
throw Sneaky.sneakyThrow(t);
}
private KeyMapDto buildKeyMap(EVCacheClient client, Collection<EVCacheKey> evcacheKeys) {
boolean hasHashedKey = false;
final Map<String, EVCacheKey> keyMap = new HashMap<String, EVCacheKey>(evcacheKeys.size() * 2);
for (EVCacheKey evcKey : evcacheKeys) {
String key = evcKey.getCanonicalKey(client.isDuetClient());
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if (hashKey != null) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]");
key = hashKey;
hasHashedKey = true;
}
keyMap.put(key, evcKey);
}
return new KeyMapDto(keyMap, hasHashedKey);
}
private <T> Map<EVCacheKey, T> buildNonHashedKeyValueResult(Map<String, T> objMap,
Map<String, EVCacheKey> keyMap) {
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, T> i : objMap.entrySet()) {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, i.getValue());
}
return retMap;
}
private <T> Map<EVCacheKey, T> buildHashedKeyValueResult(Map<String, Object> objMap,
Transcoder<T> tc,
EVCacheClient client,
Map<String, EVCacheKey> keyMap) {
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, Object> i : objMap.entrySet()) {
final Object obj = i.getValue();
if (obj instanceof EVCacheValue) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value");
final EVCacheValue val = (EVCacheValue) obj;
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final T tVal;
if (tc == null) {
tVal = (T) client.getTranscoder().decode(cd);
} else {
tVal = tc.decode(cd);
}
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) {
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, tVal);
} else {
if (log.isDebugEnabled() && shouldLog())
log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.COMPLETABLE_FUTURE_GET_BULK.name(), EVCacheMetricsFactory.READ);
}
} else {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog())
log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, (T) obj);
}
}
return retMap;
}
private <T> Map<EVCacheKey, T> getBulkData(EVCacheClient client, Collection<EVCacheKey> evcacheKeys, Transcoder<T> tc, boolean throwException, boolean hasZF) throws Exception {
try {
boolean hasHashedKey = false;
final Map<String, EVCacheKey> keyMap = new HashMap<>(evcacheKeys.size() * 2);
for(EVCacheKey evcKey : evcacheKeys) {
String key = evcKey.getCanonicalKey(client.isDuetClient());
String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if(hashKey != null) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + key + "], has been hashed [" + hashKey + "]");
key = hashKey;
hasHashedKey = true;
}
keyMap.put(key, evcKey);
}
if(hasHashedKey) {
final Map<String, Object> objMap = client.getBulk(keyMap.keySet(), evcacheValueTranscoder, throwException, hasZF);
final Map<EVCacheKey, T> retMap = new HashMap<>((int) (objMap.size() / 0.75) + 1);
for (Map.Entry<String, Object> i : objMap.entrySet()) {
final Object obj = i.getValue();
if(obj instanceof EVCacheValue) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", The value for key [" + i.getKey() + "] is EVCache Value");
final EVCacheValue val = (EVCacheValue)obj;
final CachedData cd = new CachedData(val.getFlags(), val.getValue(), CachedData.MAX_SIZE);
final T tVal;
if(tc == null) {
tVal = (T)client.getTranscoder().decode(cd);
} else {
tVal = tc.decode(cd);
}
final EVCacheKey evcKey = keyMap.get(i.getKey());
if(evcKey.getCanonicalKey(client.isDuetClient()).equals(val.getKey())) {
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, tVal);
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("CACHE COLLISION : APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
incrementFailure(EVCacheMetricsFactory.KEY_HASH_COLLISION, Call.BULK.name(), EVCacheMetricsFactory.READ);
}
} else {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, (T)obj);
}
}
return retMap;
} else {
if(tc == null && _transcoder != null) tc = (Transcoder<T>)_transcoder;
final Map<String, T> objMap = client.getBulk(keyMap.keySet(), tc, throwException, hasZF);
final Map<EVCacheKey, T> retMap = new HashMap<EVCacheKey, T>((int)(objMap.size()/0.75) + 1);
for (Map.Entry<String, T> i : objMap.entrySet()) {
final EVCacheKey evcKey = keyMap.get(i.getKey());
if (log.isDebugEnabled() && shouldLog()) log.debug("APP " + _appName + ", key [" + i.getKey() + "] EVCacheKey " + evcKey);
retMap.put(evcKey, i.getValue());
}
return retMap;
}
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while getBulk data for APP " + _appName + ", key : " + evcacheKeys, ex);
if (!throwException || hasZF) return null;
throw ex;
}
}
public <T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException {
return getBulk(keys, tc, false, 0);
}
public <T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive)
throws EVCacheException {
return getBulk(keys, tc, true, timeToLive);
}
private <T> Map<String, T> getBulk(final Collection<String> keys, Transcoder<T> tc, boolean touch, int timeToLive) throws EVCacheException {
if (null == keys) throw new IllegalArgumentException();
if (keys.isEmpty()) return Collections.<String, T> emptyMap();
checkTTL(timeToLive, Call.BULK);
final boolean throwExc = doThrowException();
final EVCacheClient client = _pool.getEVCacheClientForRead();
if (client == null) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.BULK);
if (throwExc) throw new EVCacheException("Could not find a client to get the data in bulk");
return Collections.<String, T> emptyMap();// Fast failure
}
final Map<String, T> decanonicalR = new HashMap<String, T>((keys.size() * 4) / 3 + 1);
final Collection<EVCacheKey> evcKeys = new ArrayList<EVCacheKey>();
/* Canonicalize keys and perform fast failure checking */
for (String k : keys) {
final EVCacheKey evcKey = getEVCacheKey(k);
T value = null;
if (_useInMemoryCache.get()) {
try {
final Transcoder<T> transcoder = (tc == null) ? ((_transcoder == null) ? (Transcoder<T>) _pool.getEVCacheClientForRead().getTranscoder() : (Transcoder<T>) _transcoder) : tc;
value = (T) getInMemoryCache(transcoder).get(evcKey);
if(value == null) if (log.isInfoEnabled() && shouldLog()) log.info("Value not_found in inmemory cache for APP " + _appName + ", key : " + evcKey + "; value : " + value );
} catch (ExecutionException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("ExecutionException while getting data from InMemory Cache", e);
throw new EVCacheException("ExecutionException", e);
}
}
if(value == null) {
evcKeys.add(evcKey);
} else {
decanonicalR.put(evcKey.getKey(), value);
if (log.isDebugEnabled() && shouldLog()) log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : " + evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
}
}
if(evcKeys.size() == 0 && decanonicalR.size() == keys.size()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from inmemory cache for APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
return decanonicalR;
}
final EVCacheEvent event = createEVCacheEvent(Collections.singletonList(client), Call.BULK);
if (event != null) {
event.setEVCacheKeys(evcKeys);
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + keys);
return Collections.<String, T> emptyMap();
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.BULK);
return null;
}
event.setTTL(timeToLive);
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String cacheOperation = EVCacheMetricsFactory.YES;
int tries = 1;
String status = EVCacheMetricsFactory.SUCCESS;
try {
final boolean hasZF = hasZoneFallbackForBulk();
boolean throwEx = hasZF ? false : throwExc;
Map<EVCacheKey, T> retMap = getBulkData(client, evcKeys, tc, throwEx, hasZF);
List<EVCacheClient> fbClients = null;
if (hasZF) {
if (retMap == null || retMap.isEmpty()) {
fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int i = 0; i < fbClients.size(); i++) {
final EVCacheClient fbClient = fbClients.get(i);
if(i >= fbClients.size() - 1) throwEx = throwExc;
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + evcKeys);
return null;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
status = EVCacheMetricsFactory.THROTTLED;
return null;
}
}
tries++;
retMap = getBulkData(fbClient, evcKeys, tc, throwEx, (i < fbClients.size() - 1) ? true : false);
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + evcKeys + (log.isTraceEnabled() ? "], Value [" + retMap : "") + "], zone : " + fbClient.getZone());
if (retMap != null && !retMap.isEmpty()) break;
}
//increment("BULK-FULL_RETRY-" + ((retMap == null || retMap.isEmpty()) ? "MISS" : "HIT"));
}
} else if (retMap != null && keys.size() > retMap.size() && _bulkPartialZoneFallbackFP.get()) {
final int initRetrySize = keys.size() - retMap.size();
List<EVCacheKey> retryEVCacheKeys = new ArrayList<EVCacheKey>(initRetrySize);
for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) {
final EVCacheKey key = keysItr.next();
if (!retMap.containsKey(key)) {
retryEVCacheKeys.add(key);
}
}
fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (fbClients != null && !fbClients.isEmpty()) {
for (int ind = 0; ind < fbClients.size(); ind++) {
final EVCacheClient fbClient = fbClients.get(ind);
if (event != null) {
try {
if (shouldThrottle(event)) {
status = EVCacheMetricsFactory.THROTTLED;
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & keys " + retryEVCacheKeys);
return null;
}
} catch(EVCacheException ex) {
status = EVCacheMetricsFactory.THROTTLED;
if(throwExc) throw ex;
return null;
}
}
tries++;
final Map<EVCacheKey, T> fbRetMap = getBulkData(fbClient, retryEVCacheKeys, tc, false, hasZF);
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + retryEVCacheKeys + "], Fallback Server Group : " + fbClient .getServerGroup().getName());
for (Map.Entry<EVCacheKey, T> i : fbRetMap.entrySet()) {
retMap.put(i.getKey(), i.getValue());
if (log.isDebugEnabled() && shouldLog()) log.debug("Fallback for APP " + _appName + ", key [" + i.getKey() + (log.isTraceEnabled() ? "], Value [" + i.getValue(): "]"));
}
if (retryEVCacheKeys.size() == fbRetMap.size()) break;
if (ind < fbClients.size()) {
retryEVCacheKeys = new ArrayList<EVCacheKey>(keys.size() - retMap.size());
for (Iterator<EVCacheKey> keysItr = evcKeys.iterator(); keysItr.hasNext();) {
final EVCacheKey key = keysItr.next();
if (!retMap.containsKey(key)) {
retryEVCacheKeys.add(key);
}
}
}
}
}
if (log.isDebugEnabled() && shouldLog() && retMap.size() == keys.size()) log.debug("Fallback SUCCESS for APP " + _appName + ", retMap [" + retMap + "]");
}
}
if(decanonicalR.isEmpty()) {
if (retMap == null || retMap.isEmpty()) {
if (log.isInfoEnabled() && shouldLog()) log.info("BULK : APP " + _appName + " ; Full cache miss for keys : " + keys);
if (event != null) event.setAttribute("status", "BMISS_ALL");
final Map<String, T> returnMap = new HashMap<String, T>();
if (retMap != null && retMap.isEmpty()) {
for (String k : keys) {
returnMap.put(k, null);
}
}
//increment("BulkMissFull");
cacheOperation = EVCacheMetricsFactory.NO;
/* If both Retry and first request fail Exit Immediately. */
if (event != null) endEvent(event);
return returnMap;
}
}
/* Decanonicalize the keys */
boolean partialHit = false;
final List<String> decanonicalHitKeys = new ArrayList<String>(retMap.size());
for (Iterator<EVCacheKey> itr = evcKeys.iterator(); itr.hasNext();) {
final EVCacheKey key = itr.next();
final String deCanKey = key.getKey();
final T value = retMap.get(key);
if (value != null) {
decanonicalR.put(deCanKey, value);
if (touch) touchData(key, timeToLive);
decanonicalHitKeys.add(deCanKey);
} else {
partialHit = true;
// this ensures the fallback was tried
decanonicalR.put(deCanKey, null);
}
}
if (!decanonicalR.isEmpty()) {
if (!partialHit) {
if (event != null) event.setAttribute("status", "BHIT");
} else {
if (event != null) {
event.setAttribute("status", "BHIT_PARTIAL");
event.setAttribute("BHIT_PARTIAL_KEYS", decanonicalHitKeys);
}
//increment("BulkHitPartial");
cacheOperation = EVCacheMetricsFactory.PARTIAL;
if (log.isInfoEnabled() && shouldLog()) log.info("BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]");
}
}
if (log.isDebugEnabled() && shouldLog()) log.debug("BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
if (event != null) endEvent(event);
return decanonicalR;
} catch (net.spy.memcached.internal.CheckedOperationTimeoutException ex) {
status = EVCacheMetricsFactory.TIMEOUT;
if (log.isDebugEnabled() && shouldLog()) log.debug("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys : " + evcKeys, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("CheckedOperationTimeoutException getting bulk data for APP " + _appName + ", keys = " + evcKeys
+ ".\nYou can set the following property to increase the timeout " + _appName + ".EVCacheClientPool.bulkReadTimeout=<timeout in milli-seconds>", ex);
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return null;
throw new EVCacheException("Exception getting bulk data for APP " + _appName + ", keys = " + evcKeys, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
if(bulkKeysSize == null) {
final List<Tag> tagList = new ArrayList<Tag>(4);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, EVCacheMetricsFactory.READ));
// if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.STATUS, status));
// if(tries >= 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries)));
bulkKeysSize = EVCacheMetricsFactory.getInstance().getDistributionSummary(EVCacheMetricsFactory.OVERALL_KEYS_SIZE, tagList);
}
bulkKeysSize.record(keys.size());
getTimer(Call.BULK.name(), EVCacheMetricsFactory.READ, cacheOperation, status, tries, maxReadDuration.get().intValue(), client.getServerGroup()).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("BULK : APP " + _appName + " Took " + duration + " milliSec to get the value for key " + evcKeys);
}
}
private <T> CompletableFuture<EVCacheBulkDataDto<T>> handleBulkInMemory(Collection<String> keys, Transcoder<T> tc) {
if (log.isDebugEnabled() && shouldLog()) log.debug("handleBulkInMemory with keys {} " + keys);
final Map<String, T> decanonicalR = new HashMap<>((keys.size() * 4) / 3 + 1);
final List<EVCacheKey> evcKeys = new ArrayList<>();
CompletableFuture<EVCacheBulkDataDto<T>> promise = new CompletableFuture<>();
try {
EVCacheBulkDataDto<T> data = handleBulkInMemory(keys, tc, decanonicalR, evcKeys);
promise.complete(data);
} catch (Exception e) {
promise.completeExceptionally(e);
}
return promise;
}
private <T> EVCacheBulkDataDto<T> handleBulkInMemory(Collection<String> keys,
Transcoder<T> tc,
Map<String, T> decanonicalR,
List<EVCacheKey> evcKeys) throws Exception {
for (String k : keys) {
final EVCacheKey evcKey = getEVCacheKey(k);
T value = getInMemory(evcKey, tc);
if (value != null) {
decanonicalR.put(evcKey.getKey(), value);
if (log.isDebugEnabled() && shouldLog())
log.debug("Value retrieved from inmemory cache for APP " + _appName + ", key : "
+ evcKey + (log.isTraceEnabled() ? "; value : " + value : ""));
} else {
if (log.isDebugEnabled() && shouldLog()) log.debug("Key not present in in memory {} " + k);
evcKeys.add(evcKey);
}
}
return new EVCacheBulkDataDto<>(decanonicalR, evcKeys);
}
public <T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys) {
return this.getAsyncBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder);
}
public <T> CompletableFuture<Map<String, T>> getAsyncBulk(final Collection<String> keys, Transcoder<T> tc) {
if (null == keys) throw new IllegalArgumentException();
if (keys.isEmpty()) return CompletableFuture.completedFuture(Collections.emptyMap());
return handleBulkInMemory(keys, tc)
.thenCompose(dto -> doAsyncGetBulk(keys, tc, dto));
}
private <T> CompletableFuture<Map<String, T>> doAsyncGetBulk(Collection<String> keys,
Transcoder<T> tc,
EVCacheBulkDataDto<T> dto) {
// all keys handled by in memory
if(dto.getEvcKeys().size() == 0 && dto.getDecanonicalR().size() == keys.size()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("All Values retrieved from in-memory cache for APP " + _appName + ", keys : " + keys);
return CompletableFuture.completedFuture(dto.getDecanonicalR());
}
final boolean throwExc = doThrowException();
CompletableFuture<Map<String, T>> errorFuture = new CompletableFuture<>();
EVCacheClient client = buildEvCacheClient(throwExc, Call.COMPLETABLE_FUTURE_GET_BULK, errorFuture);
if (errorFuture.isCompletedExceptionally() || client == null) {
if (client == null ) {
if (log.isDebugEnabled() && shouldLog()) log.debug("doAsyncGetBulk is null");
errorFuture.complete(null);
}
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Completed Building the client for doAsyncGetBulk");
//Building the start event
EVCacheEvent event = buildAndStartEvent(client, dto.getEvcKeys(), throwExc, errorFuture, Call.COMPLETABLE_FUTURE_GET_BULK);
if (errorFuture.isCompletedExceptionally()) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Error while building and starting the event for doAsyncGetBulk");
return errorFuture;
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Cancelling the error future");
errorFuture.cancel(false);
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
StringBuilder status = new StringBuilder(EVCacheMetricsFactory.SUCCESS);
StringBuilder cacheOperation = new StringBuilder(EVCacheMetricsFactory.YES);
final boolean hasZF = hasZoneFallbackForBulk();
RetryCount retryCount = new RetryCount();
boolean throwEx = !hasZF && throwExc;
return getAsyncBulkData(client, dto.getEvcKeys(), tc)
.thenCompose(data -> handleBulkRetry(data, dto.getEvcKeys(), tc, client, event, hasZF, retryCount))
.handle((data, ex) -> {
if (ex != null) {
handleFullCacheMiss(data, event, keys, cacheOperation);
handleException(ex, event);
if (throwEx) {
throw new RuntimeException(ex);
} else {
return null;
}
} else {
Map<String, T> result = handleBulkData(dto.getDecanonicalR(),
data,
event,
keys,
dto.getEvcKeys(),
cacheOperation);
handleBulkFinally(status, retryCount, client, cacheOperation, keys, start);
return result;
}
});
}
private <T> Map<String, T> handleBulkData(Map<String, T> decanonicalR,
Map<EVCacheKey, T> retMap,
EVCacheEvent event,
Collection<String> keys,
List<EVCacheKey> evcKeys,
StringBuilder cacheOperation) {
if(retMap == null || retMap.isEmpty()) {
return handleFullCacheMiss(retMap, event, keys, cacheOperation);
}
boolean partialHit = false;
final List<String> decanonicalHitKeys = new ArrayList<>(retMap.size());
for (final EVCacheKey key : evcKeys) {
final String deCanKey = key.getKey();
final T value = retMap.get(key);
if (value != null) {
decanonicalR.put(deCanKey, value);
decanonicalHitKeys.add(deCanKey);
} else {
partialHit = true;
// this ensures the fallback was tried
decanonicalR.put(deCanKey, null);
}
}
if (!decanonicalR.isEmpty()) {
updateBulkGetEvent(decanonicalR, event, keys, partialHit, decanonicalHitKeys, cacheOperation);
}
if (log.isDebugEnabled() && shouldLog()) log.debug("Async BulkGet; APP " + _appName + ", keys : " + keys + (log.isTraceEnabled() ? "; value : " + decanonicalR : ""));
if (event != null) endEvent(event);
return decanonicalR;
}
private <T> void updateBulkGetEvent(Map<String, T> decanonicalR,
EVCacheEvent event,
Collection<String> keys,
boolean partialHit,
List<String> decanonicalHitKeys,
StringBuilder cacheOperation) {
if (!partialHit) {
if (event != null) event.setAttribute("status", "ASYNC_BHIT");
} else {
if (event != null) {
event.setAttribute("status", "ASYNC_BHIT_PARTIAL");
event.setAttribute("ASYNC_BHIT_PARTIAL_KEYS", decanonicalHitKeys);
}
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.PARTIAL);
if (log.isInfoEnabled() && shouldLog())
log.info("ASYNC_BULK_HIT_PARTIAL for APP " + _appName + ", keys in cache [" + decanonicalR + "], all keys [" + keys + "]");
}
}
private <T> Map<String, T> handleFullCacheMiss(Map<EVCacheKey, T> retMap,
EVCacheEvent event,
Collection<String> keys,
StringBuilder cacheOperation) {
if (log.isInfoEnabled() && shouldLog())
log.info("ASYNC BULK : APP " + _appName + " ; Full cache miss for keys : " + keys);
if (event != null) event.setAttribute("status", "ASYNC_BMISS_ALL");
final Map<String, T> returnMap = new HashMap<>();
if (retMap != null && retMap.isEmpty()) {
for (String k : keys) {
returnMap.put(k, null);
}
}
cacheOperation.replace(0, cacheOperation.length(), EVCacheMetricsFactory.NO);
if (event != null) endEvent(event);
return returnMap;
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullRetry(EVCacheClient client,
EVCacheEvent event,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
RetryCount retryCount) {
final List<EVCacheClient> fbClients = _pool.getEVCacheClientsForReadExcluding(client.getServerGroup());
if (log.isInfoEnabled() && shouldLog()) {
log.info("Fetching the clients for retry {}", fbClients);
}
return handleFullBulkRetries(fbClients, 0, event, evcKeys, tc, retryCount);
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleFullBulkRetries(List<EVCacheClient> fbClients,
int fbClientIndex,
EVCacheEvent event,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
RetryCount retryCount) {
if (fbClientIndex >= fbClients.size()) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Clients exhausted so returning the future with null result for keys {}", evcKeys);
}
return CompletableFuture.completedFuture(null);
}
if (log.isInfoEnabled() && shouldLog()) {
EVCacheClient evCacheClient = fbClients.get(fbClientIndex);
log.debug("Trying to fetching the data from server group {} client {} and keys {}", evCacheClient.getServerGroupName(), evCacheClient.getId(), evcKeys);
}
CompletableFuture<Map<EVCacheKey, T>> future = getAsyncBulkData(fbClients.get(fbClientIndex), event, evcKeys, tc);
int nextIndex = fbClientIndex + 1;
retryCount.incr();
return future
.thenApply(s -> s != null ?
CompletableFuture.completedFuture(s) :
handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount))
.exceptionally(t -> handleFullBulkRetries(fbClients, nextIndex, event, evcKeys, tc, retryCount))
.thenCompose(Function.identity());
}
private <T> CompletableFuture<Map<EVCacheKey, T>> handleBulkRetry(Map<EVCacheKey, T> retMap,
List<EVCacheKey> evcKeys,
Transcoder<T> tc,
EVCacheClient client,
EVCacheEvent event,
boolean hasZF,
RetryCount retryCount) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("handling Bulk retry with keys {}", evcKeys);
}
if (hasZF && (retMap == null || retMap.isEmpty())) {
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Return map is null or empty for going for a full retry {} ", evcKeys);
}
return handleFullRetry(client, event, evcKeys, tc, retryCount);
}
if (log.isInfoEnabled() && shouldLog()) {
log.debug("Async does not yet support partial retry for bulk. So completing the future or keys {}", evcKeys);
}
return CompletableFuture.completedFuture(retMap);
}
public <T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException {
return (this.getBulk(keys, (Transcoder<T>) _transcoder));
}
public <T> Map<String, T> getBulk(String... keys) throws EVCacheException {
return (this.getBulk(Arrays.asList(keys), (Transcoder<T>) _transcoder));
}
public <T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException {
return (this.getBulk(Arrays.asList(keys), tc));
}
@Override
public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = this.set(key, value, tc, timeToLive, null);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
eFutures[i] = new EVCacheFuture(futures.get(i), key, _appName, ((EVCacheOperationFuture<T>) futures.get(i)).getServerGroup());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
public <T> EVCacheLatch set(String key, T value, Policy policy) throws EVCacheException {
return set(key, value, (Transcoder<T>)_transcoder, _timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, int timeToLive, Policy policy) throws EVCacheException {
return set(key, value, (Transcoder<T>)_transcoder, timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException {
return set(key, value, tc, _timeToLive, policy);
}
public <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
return this.set(key, value, tc, timeToLive, policy, clients, clients.length - _pool.getWriteOnlyEVCacheClients().length);
}
protected <T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.SET);
final boolean throwExc = doThrowException();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.SET);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.SET);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName);
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.SET);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, latchCount, _appName);
try {
CachedData cd = null;
CachedData cdHashed = null;
for (EVCacheClient client : clients) {
final String canonicalKey = evcKey.getCanonicalKey(client.isDuetClient());
final String hashKey = evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder());
if(cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
}
if (hashKey != null) {
if(cdHashed == null) {
final EVCacheValue val = new EVCacheValue(canonicalKey, cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis());
cdHashed = evcacheValueTranscoder.encode(val);
}
final Future<Boolean> future = client.set(hashKey, cdHashed, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for hashed key : " + evcKey);
} else {
final Future<Boolean> future = client.set(canonicalKey, cd, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) endEvent(event);
status = EVCacheMetricsFactory.ERROR;
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTTLDistributionSummary(Call.SET.name(), EVCacheMetricsFactory.WRITE, EVCacheMetricsFactory.TTL).record(timeToLive);
getTimer(Call.SET.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("SET : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> EVCacheFuture[] append(String key, T value, int timeToLive) throws EVCacheException {
return this.append(key, value, null, timeToLive);
}
public <T> EVCacheFuture[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.APPEND);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheFuture[0]; // Fast failure
}
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND);
final EVCacheKey evcKey = getEVCacheKey(key);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheFuture[0];
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
try {
final EVCacheFuture[] futures = new EVCacheFuture[clients.length];
CachedData cd = null;
int index = 0;
for (EVCacheClient client : clients) {
// ensure key hashing is not enabled
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
throw new IllegalArgumentException("append is not supported when key hashing is enabled.");
}
if (cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if ( _transcoder != null) {
cd = ((Transcoder<Object>)_transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
//if (cd != null) EVCacheMetricsFactory.getInstance().getDistributionSummary(_appName + "-AppendData-Size", tags).record(cd.getData().length);
}
final Future<Boolean> future = client.append(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd);
futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup());
}
if (event != null) {
event.setCachedData(cd);
event.setTTL(timeToLive);
endEvent(event);
}
touchData(evcKey, timeToLive, clients);
return futures;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheFuture[0];
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
//timer.record(duration, TimeUnit.MILLISECONDS);
getTimer(Call.APPEND.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> EVCacheFuture[] set(String key, T value, Transcoder<T> tc) throws EVCacheException {
return this.set(key, value, tc, _timeToLive);
}
public <T> EVCacheFuture[] set(String key, T value, int timeToLive) throws EVCacheException {
return this.set(key, value, (Transcoder<T>) _transcoder, timeToLive);
}
public <T> EVCacheFuture[] set(String key, T value) throws EVCacheException {
return this.set(key, value, (Transcoder<T>) _transcoder, _timeToLive);
}
public EVCacheFuture[] delete(String key) throws EVCacheException {
return this.deleteInternal(key, false);
}
protected EVCacheFuture[] deleteInternal(String key, boolean isOriginalKeyHashed) throws EVCacheException {
final EVCacheLatch latch = this.deleteInternal(key, null, isOriginalKeyHashed);
if (latch == null) return new EVCacheFuture[0];
final List<Future<Boolean>> futures = latch.getAllFutures();
if (futures == null || futures.isEmpty()) return new EVCacheFuture[0];
final EVCacheFuture[] eFutures = new EVCacheFuture[futures.size()];
for (int i = 0; i < futures.size(); i++) {
final Future<Boolean> future = futures.get(i);
if (future instanceof EVCacheFuture) {
eFutures[i] = (EVCacheFuture) future;
} else if (future instanceof EVCacheOperationFuture) {
final EVCacheOperationFuture<Boolean> evfuture = (EVCacheOperationFuture<Boolean>)future;
eFutures[i] = new EVCacheFuture(future, key, _appName, evfuture.getServerGroup(), evfuture.getEVCacheClient());
} else {
eFutures[i] = new EVCacheFuture(future, key, _appName, null);
}
}
return eFutures;
}
@Override
public <T> EVCacheLatch delete(String key, Policy policy) throws EVCacheException {
return this.deleteInternal(key, policy, false);
}
protected <T> EVCacheLatch deleteInternal(String key, Policy policy, boolean isOriginalKeyHashed) throws EVCacheException {
if (key == null) throw new IllegalArgumentException("Key cannot be null");
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DELETE);
if (throwExc) throw new EVCacheException("Could not find a client to delete the keyAPP " + _appName
+ ", Key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DELETE);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DELETE);
return null;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
try {
for (int i = 0; i < clients.length; i++) {
Future<Boolean> future = clients[i].delete(isOriginalKeyHashed ? evcKey.getKey() : evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
if (event != null) {
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while deleting the data for APP " + _appName + ", key : " + key, ex);
status = EVCacheMetricsFactory.ERROR;
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception while deleting the data for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.DELETE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
//timer.record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("DELETE : APP " + _appName + " Took " + duration + " milliSec for key : " + key);
}
}
public int getDefaultTTL() {
return _timeToLive;
}
public long incr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException {
if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.INCR);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.INCR);
if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : " + _metricPrefix + ":NULL_CLIENT");
if (throwExc) throw new EVCacheException("Could not find a client to incr the data");
return -1;
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.INCR);
if (event != null) {
event.setTTL(timeToLive);
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return -1;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.INCR);
return -1;
}
startEvent(event);
}
String status = EVCacheMetricsFactory.SUCCESS;
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
long currentValue = -1;
try {
final long[] vals = new long[clients.length];
int index = 0;
for (EVCacheClient client : clients) {
vals[index] = client.incr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive);
if (vals[index] != -1 && currentValue < vals[index]) {
currentValue = vals[index];
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client);
}
index++;
}
if (currentValue != -1) {
CachedData cd = null;
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " current value = " + currentValue + " for key : " + key);
for (int i = 0; i < vals.length; i++) {
if (vals[i] == -1 && currentValue > -1) {
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value = -1 so setting it to current value = " + currentValue + " for key : " + key);
clients[i].incr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive);
} else if (vals[i] != currentValue) {
if(cd == null) cd = clients[i].getTranscoder().encode(String.valueOf(currentValue));
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value of " + vals[i] + " so setting it to current value = " + currentValue + " for key : " + key);
clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive);
}
}
}
if (event != null) endEvent(event);
if (log.isDebugEnabled()) log.debug("INCR : APP " + _appName + " returning value = " + currentValue + " for key : " + key);
return currentValue;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception incrementing the value for APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return -1;
throw new EVCacheException("Exception incrementing value for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.INCR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("INCR : APP " + _appName + ", Took " + duration
+ " milliSec for key : " + key + " with value as " + currentValue);
}
}
public long decr(String key, long by, long defaultVal, int timeToLive) throws EVCacheException {
if ((null == key) || by < 0 || defaultVal < 0 || timeToLive < 0) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.DECR);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.DECR);
if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : " + _metricPrefix + ":NULL_CLIENT");
if (throwExc) throw new EVCacheException("Could not find a client to decr the data");
return -1;
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.DECR);
if (event != null) {
event.setTTL(timeToLive);
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return -1;
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.DECR);
return -1;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
long currentValue = -1;
try {
final long[] vals = new long[clients.length];
int index = 0;
for (EVCacheClient client : clients) {
vals[index] = client.decr(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), by, defaultVal, timeToLive);
if (vals[index] != -1 && currentValue < vals[index]) {
currentValue = vals[index];
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue + " for key : " + key + " from client : " + client);
}
index++;
}
if (currentValue != -1) {
CachedData cd = null;
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " current value = " + currentValue
+ " for key : " + key);
for (int i = 0; i < vals.length; i++) {
if (vals[i] == -1 && currentValue > -1) {
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value = -1 so setting it to current value = "
+ currentValue + " for key : " + key);
clients[i].decr(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), 0, currentValue, timeToLive);
} else if (vals[i] != currentValue) {
if(cd == null) cd = clients[i].getTranscoder().encode(currentValue);
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + "; Zone " + clients[i].getZone()
+ " had a value of " + vals[i]
+ " so setting it to current value = " + currentValue + " for key : " + key);
clients[i].set(evcKey.getDerivedKey(clients[i].isDuetClient(), clients[i].getHashingAlgorithm(), clients[i].shouldEncodeHashKey(), clients[i].getMaxDigestBytes(), clients[i].getMaxHashLength(), clients[i].getBaseEncoder()), cd, timeToLive);
}
}
}
if (event != null) endEvent(event);
if (log.isDebugEnabled()) log.debug("DECR : APP " + _appName + " returning value = " + currentValue + " for key : " + key);
return currentValue;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception decrementing the value for APP " + _appName + ", key : " + key, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return -1;
throw new EVCacheException("Exception decrementing value for APP " + _appName + ", key : " + key, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.DECR.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("DECR : APP " + _appName + ", Took " + duration + " milliSec for key : " + key + " with value as " + currentValue);
}
}
@Override
public <T> EVCacheLatch replace(String key, T value, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>) _transcoder, policy);
}
@Override
public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>) _transcoder, _timeToLive, policy);
}
public <T> EVCacheLatch replace(String key, T value, int timeToLive, Policy policy) throws EVCacheException {
return replace(key, value, (Transcoder<T>)_transcoder, timeToLive, policy);
}
@Override
public <T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy)
throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.REPLACE);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.REPLACE);
if (throwExc) throw new EVCacheException("Could not find a client to set the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.REPLACE);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName);
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.REPLACE);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
try {
final EVCacheFuture[] futures = new EVCacheFuture[clients.length];
CachedData cd = null;
int index = 0;
for (EVCacheClient client : clients) {
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
final EVCacheValue val = new EVCacheValue(evcKey.getCanonicalKey(client.isDuetClient()), cd.getData(), cd.getFlags(), timeToLive, System.currentTimeMillis());
cd = evcacheValueTranscoder.encode(val);
}
final Future<Boolean> future = client.replace(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch);
futures[index++] = new EVCacheFuture(future, key, _appName, client.getServerGroup());
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception setting the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception setting data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.REPLACE.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("REPLACE : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
@Override
public String getCachePrefix() {
return _cacheName;
}
public String getAppName() {
return _appName;
}
public String getCacheName() {
return _cacheName;
}
public <T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.APPEND_OR_ADD);
final boolean throwExc = doThrowException();
final EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.APPEND_OR_ADD);
if (throwExc) throw new EVCacheException("Could not find a client to appendOrAdd the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.APPEND_OR_ADD);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.APPEND_OR_ADD);
return null;
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
final EVCacheLatchImpl latch = new EVCacheLatchImpl(policy == null ? Policy.ALL_MINUS_1 : policy, clients.length - _pool.getWriteOnlyEVCacheClients().length, _appName);
String status = EVCacheMetricsFactory.SUCCESS;
try {
CachedData cd = null;
for (EVCacheClient client : clients) {
// ensure key hashing is not enabled
if (evcKey.getHashKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()) != null) {
throw new IllegalArgumentException("appendOrAdd is not supported when key hashing is enabled.");
}
if (cd == null) {
if (tc != null) {
cd = tc.encode(value);
} else if ( _transcoder != null) {
cd = ((Transcoder<Object>)_transcoder).encode(value);
} else {
cd = client.getTranscoder().encode(value);
}
}
final Future<Boolean> future = client.appendOrAdd(evcKey.getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()), cd, timeToLive, latch);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Future " + future + " for key : " + evcKey);
}
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if(_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
latch.scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception while appendOrAdd the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception while appendOrAdd data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.APPEND_OR_ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("APPEND_OR_ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
public <T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = this.appendOrAdd(key, value, tc, timeToLive, Policy.ALL_MINUS_1);
if(latch != null) return latch.getAllFutures().toArray(new Future[latch.getAllFutures().size()]);
return new EVCacheFuture[0];
}
public <T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException {
final EVCacheLatch latch = add(key, value, tc, timeToLive, Policy.NONE);
try {
latch.await(_pool.getOperationTimeout().get(), TimeUnit.MILLISECONDS);
final List<Future<Boolean>> allFutures = latch.getAllFutures();
for(Future<Boolean> future : allFutures) {
if(!future.get()) return false;
}
return true;
} catch (InterruptedException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e);
final boolean throwExc = doThrowException();
if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e);
return false;
} catch (ExecutionException e) {
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + key, e);
final boolean throwExc = doThrowException();
if(throwExc) throw new EVCacheException("Exception add data for APP " + _appName + ", key : " + key, e);
return false;
}
}
@Override
public <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getEVCacheClientForWrite();
EVCacheClient[] writeOnlyClients = _pool.getWriteOnlyEVCacheClients();
// In case of adds , we skip adds to the pool if value is already present in the 1st client
// Sorting to make sure the 1st element of the list is a read/write client and not just write-only client
EVCacheClient[] sortedClients = sortClients(clients, writeOnlyClients);
return this.add(key, value, tc, timeToLive, policy, sortedClients, clients.length - _pool.getWriteOnlyEVCacheClients().length);
}
public EVCacheClient[] sortClients(EVCacheClient[] clients, EVCacheClient[] writeOnlyClients) {
List<EVCacheClient> writeOnlyClientsList = Arrays.asList(writeOnlyClients);
List<EVCacheClient> clientList = Arrays.stream(clients).sorted((s1, s2) -> {
if (writeOnlyClientsList.contains(s1))
return 1;
return -1;
}).collect(Collectors.toList());
return clientList.stream().toArray(EVCacheClient[]::new);
}
protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount) throws EVCacheException {
return add(key, value, tc, timeToLive, policy, clients, latchCount, true);
}
protected <T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy, EVCacheClient[] clients, int latchCount, boolean fixup) throws EVCacheException {
if ((null == key) || (null == value)) throw new IllegalArgumentException();
checkTTL(timeToLive, Call.ADD);
final boolean throwExc = doThrowException();
if (clients.length == 0) {
incrementFastFail(EVCacheMetricsFactory.NULL_CLIENT, Call.ADD);
if (throwExc) throw new EVCacheException("Could not find a client to Add the data");
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
final EVCacheKey evcKey = getEVCacheKey(key);
final EVCacheEvent event = createEVCacheEvent(Arrays.asList(clients), Call.ADD);
if (event != null) {
event.setEVCacheKeys(Arrays.asList(evcKey));
try {
if (shouldThrottle(event)) {
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD);
if (throwExc) throw new EVCacheException("Request Throttled for app " + _appName + " & key " + key);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
} catch(EVCacheException ex) {
if(throwExc) throw ex;
incrementFastFail(EVCacheMetricsFactory.THROTTLED, Call.ADD);
return new EVCacheLatchImpl(policy, 0, _appName); // Fast failure
}
startEvent(event);
}
final long start = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime();
String status = EVCacheMetricsFactory.SUCCESS;
EVCacheLatch latch = null;
try {
CachedData cd = null;
if (tc != null) {
cd = tc.encode(value);
} else if (_transcoder != null) {
cd = ((Transcoder<Object>) _transcoder).encode(value);
} else {
cd = _pool.getEVCacheClientForRead().getTranscoder().encode(value);
}
if (clientUtil == null) clientUtil = new EVCacheClientUtil(_appName, _pool.getOperationTimeout().get());
latch = clientUtil.add(evcKey, cd, evcacheValueTranscoder, timeToLive, policy, clients, latchCount, fixup);
if (event != null) {
event.setTTL(timeToLive);
event.setCachedData(cd);
if (_eventsUsingLatchFP.get()) {
latch.setEVCacheEvent(event);
if (latch instanceof EVCacheLatchImpl)
((EVCacheLatchImpl) latch).scheduledFutureValidation();
} else {
endEvent(event);
}
}
return latch;
} catch (Exception ex) {
status = EVCacheMetricsFactory.ERROR;
if (log.isDebugEnabled() && shouldLog()) log.debug("Exception adding the data for APP " + _appName + ", key : " + evcKey, ex);
if (event != null) {
event.setStatus(status);
eventError(event, ex);
}
if (!throwExc) return new EVCacheLatchImpl(policy, 0, _appName);
throw new EVCacheException("Exception adding data for APP " + _appName + ", key : " + evcKey, ex);
} finally {
final long duration = EVCacheMetricsFactory.getInstance().getRegistry().clock().wallTime()- start;
getTimer(Call.ADD.name(), EVCacheMetricsFactory.WRITE, null, status, 1, maxWriteDuration.get().intValue(), null).record(duration, TimeUnit.MILLISECONDS);
if (log.isDebugEnabled() && shouldLog()) log.debug("ADD : APP " + _appName + ", Took " + duration + " milliSec for key : " + evcKey);
}
}
private DistributionSummary getTTLDistributionSummary(String operation, String type, String metric) {
DistributionSummary distributionSummary = distributionSummaryMap.get(operation);
if(distributionSummary != null) return distributionSummary;
final List<Tag> tagList = new ArrayList<Tag>(6);
tagList.addAll(tags);
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, type));
distributionSummary = EVCacheMetricsFactory.getInstance().getDistributionSummary(metric, tagList);
distributionSummaryMap.put(operation, distributionSummary);
return distributionSummary;
}
private Timer getTimer(String operation, String operationType, String hit, String status, int tries, long duration, ServerGroup serverGroup) {
String name = ((hit != null) ? operation + hit : operation);
if(status != null) name += status;
if(tries >= 0) name += tries;
if(serverGroup != null) name += serverGroup.getName();
//if(_cacheName != null) name += _cacheName;
Timer timer = timerMap.get(name);
if(timer != null) return timer;
final List<Tag> tagList = new ArrayList<Tag>(7);
tagList.addAll(tags);
if(operation != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, operation));
if(operationType != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TYPE_TAG, operationType));
if(status != null) tagList.add(new BasicTag(EVCacheMetricsFactory.IPC_RESULT, status));
if(hit != null) tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE_HIT, hit));
switch(tries) {
case 0 :
case 1 :
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.INITIAL));
break;
case 2 :
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.SECOND));
break;
default:
tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, EVCacheMetricsFactory.THIRD_UP));
break;
}
// if(tries == 0) tagList.add(new BasicTag(EVCacheMetricsFactory.ATTEMPT, String.valueOf(tries)));
if(serverGroup != null) {
tagList.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
tagList.add(new BasicTag(EVCacheMetricsFactory.ZONE, serverGroup.getZone()));
}
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.OVERALL_CALL, tagList, Duration.ofMillis(duration));
timerMap.put(name, timer);
return timer;
}
protected List<Tag> getTags() {
return tags;
}
}
| 750 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheKey.java
|
package com.netflix.evcache;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.util.KeyHasher;
import com.netflix.evcache.util.KeyHasher.HashingAlgorithm;
public class EVCacheKey {
private static final Logger log = LoggerFactory.getLogger(EVCacheKey.class);
private final String appName;
private final HashingAlgorithm hashingAlgorithmAtAppLevel;
private final Property<Boolean> shouldEncodeHashKeyAtAppLevel;
private final Property<Integer> maxDigestBytesAtAppLevel;
private final Property<Integer> maxHashLengthAtAppLevel;
private final String key;
private final String canonicalKey;
private String canonicalKeyForDuet;
// Note that this we cache hashed keys based on Hashing Algorithm alone, but not based on other hashing properties
// like max.hash.length. So changing max.hash.length alone would not necessarily trigger hash recalculation, but
// one would have to change the hashing algorithm in order to having hashing properties taken into account.
// This is to make such a hashing property change very obvious and not subtle.
private final Map<String, String> hashedKeysByAlgorithm;
private final Map<String, String> hashedKeysByAlgorithmForDuet;
private final String encoder;
public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel) {
this(appName, key, canonicalKey, hashingAlgorithmAtAppLevel, shouldEncodeHashKeyAtAppLevel, maxDigestBytesAtAppLevel, maxHashLengthAtAppLevel, null);
}
public EVCacheKey(String appName, String key, String canonicalKey, HashingAlgorithm hashingAlgorithmAtAppLevel, Property<Boolean> shouldEncodeHashKeyAtAppLevel, Property<Integer> maxDigestBytesAtAppLevel, Property<Integer> maxHashLengthAtAppLevel, String encoder) {
super();
this.appName = appName;
this.key = key;
this.canonicalKey = canonicalKey;
this.hashingAlgorithmAtAppLevel = hashingAlgorithmAtAppLevel;
this.shouldEncodeHashKeyAtAppLevel = shouldEncodeHashKeyAtAppLevel;
this.maxDigestBytesAtAppLevel = maxDigestBytesAtAppLevel;
this.maxHashLengthAtAppLevel = maxHashLengthAtAppLevel;
this.encoder = encoder;
hashedKeysByAlgorithm = new HashMap<>();
hashedKeysByAlgorithmForDuet = new HashMap<>();
}
public String getKey() {
return key;
}
@Deprecated
public String getCanonicalKey() {
return canonicalKey;
}
public String getCanonicalKey(boolean isDuet) {
return isDuet ? getCanonicalKeyForDuet() : canonicalKey;
}
private String getCanonicalKeyForDuet() {
if (null == canonicalKeyForDuet) {
final int duetKeyLength = appName.length() + 1 + canonicalKey.length();
canonicalKeyForDuet = new StringBuilder(duetKeyLength).append(appName).append(':').append(canonicalKey).toString();
if (log.isDebugEnabled()) log.debug("canonicalKeyForDuet : " + canonicalKeyForDuet);
}
return canonicalKeyForDuet;
}
@Deprecated
public String getHashKey() {
return getHashKey(hashingAlgorithmAtAppLevel, null == shouldEncodeHashKeyAtAppLevel ? null : shouldEncodeHashKeyAtAppLevel.get(), null == maxDigestBytesAtAppLevel ? null : maxDigestBytesAtAppLevel.get(), null == maxHashLengthAtAppLevel ? null : maxHashLengthAtAppLevel.get(), encoder);
}
// overlays app level hashing and client level hashing
public String getHashKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) {
if (hashingAlgorithm == HashingAlgorithm.NO_HASHING) {
return null;
}
if (null == hashingAlgorithm) {
hashingAlgorithm = hashingAlgorithmAtAppLevel;
}
if (null == shouldEncodeHashKey) {
shouldEncodeHashKey = this.shouldEncodeHashKeyAtAppLevel.get();
}
if (null == maxDigestBytes) {
maxDigestBytes = this.maxDigestBytesAtAppLevel.get();
}
if (null == maxHashLength) {
maxHashLength = this.maxHashLengthAtAppLevel.get();
}
if(null == baseEnoder) {
baseEnoder = encoder;
}
final String rKey = isDuet ? getHashKeyForDuet(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder) : getHashKey(hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder);
if (log.isDebugEnabled()) log.debug("Key : " + rKey);
return rKey;
}
// overlays app level hashing algorithm and client level hashing algorithm
public String getDerivedKey(boolean isDuet, HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String baseEnoder) {
// this overlay of hashingAlgorithm helps determine if there at all needs to be hashing performed, otherwise, will return canonical key
if (null == hashingAlgorithm) {
hashingAlgorithm = hashingAlgorithmAtAppLevel;
}
final String derivedKey = null == hashingAlgorithm || hashingAlgorithm == HashingAlgorithm.NO_HASHING ? getCanonicalKey(isDuet) : getHashKey(isDuet, hashingAlgorithm, shouldEncodeHashKey, maxDigestBytes, maxHashLength, baseEnoder);
if (log.isDebugEnabled()) log.debug("derivedKey : " + derivedKey);
return derivedKey;
}
private String getHashKey(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) {
if (null == hashingAlgorithm) {
return null;
}
final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-";
String val = hashedKeysByAlgorithm.get(key);
if(val == null) {
val = KeyHasher.getHashedKeyEncoded(getCanonicalKey(false), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder);
hashedKeysByAlgorithm.put(key , val);
}
if (log.isDebugEnabled()) log.debug("getHashKey : " + val);
// TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly
return val;
}
private String getHashKeyForDuet(HashingAlgorithm hashingAlgorithm, Boolean shouldEncodeHashKey, Integer maxDigestBytes, Integer maxHashLength, String encoder) {
if (null == hashingAlgorithm) {
return null;
}
final String key = hashingAlgorithm.toString()+ maxDigestBytes != null ? maxDigestBytes.toString() : "-" + maxHashLength != null ? maxHashLength.toString() : "-" + encoder != null ? encoder : "-";
String val = hashedKeysByAlgorithmForDuet.get(key);
if(val == null) {
val = KeyHasher.getHashedKeyEncoded(getCanonicalKeyForDuet(), hashingAlgorithm, maxDigestBytes, maxHashLength, encoder);
hashedKeysByAlgorithmForDuet.put(key , val);
}
if (log.isDebugEnabled()) log.debug("getHashKeyForDuet : " + val);
// TODO: Once the issue around passing hashedKey in bytes[] is figured, we will start using (nullable) shouldEncodeHashKey, and call KeyHasher.getHashedKeyInBytes() accordingly
return val;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((canonicalKey == null) ? 0 : canonicalKey.hashCode());
result = prime * result + ((canonicalKeyForDuet == null) ? 0 : canonicalKeyForDuet.hashCode());
result = prime * result + ((key == null) ? 0 : key.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EVCacheKey other = (EVCacheKey) obj;
if (canonicalKey == null) {
if (other.canonicalKey != null)
return false;
} else if (!canonicalKey.equals(other.canonicalKey))
return false;
if (canonicalKeyForDuet == null) {
if (other.canonicalKeyForDuet != null)
return false;
} else if (!canonicalKeyForDuet.equals(other.canonicalKeyForDuet))
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
return true;
}
@Override
public String toString() {
return "EVCacheKey [key=" + key + ", canonicalKey=" + canonicalKey + ", canonicalKeyForDuet=" + canonicalKeyForDuet + (hashedKeysByAlgorithm.size() > 0 ? ", hashedKeysByAlgorithm=" + hashedKeysByAlgorithm.toString() : "") + (hashedKeysByAlgorithmForDuet.size() > 0 ? ", hashedKeysByAlgorithmForDuet=" + hashedKeysByAlgorithmForDuet.toString() + "]" : "]");
}
}
| 751 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheClientPoolConfigurationProperties.java
|
package com.netflix.evcache;
import java.time.Duration;
public class EVCacheClientPoolConfigurationProperties {
/**
* Prefix to be applied to keys.
*/
private String keyPrefix;
/**
* Time-to-live in seconds.
*/
private Duration timeToLive;
/**
* Whether or not retry is to be enabled.
*/
private Boolean retryEnabled = true;
/**
* Whether or not exception throwing is to be enabled.
*/
private Boolean exceptionThrowingEnabled = false;
public EVCacheClientPoolConfigurationProperties() {
this.keyPrefix = "";
this.timeToLive = Duration.ofSeconds(900);
this.retryEnabled = true;
this.exceptionThrowingEnabled = false;
}
public String getKeyPrefix() {
return keyPrefix;
}
public void setKeyPrefix(String keyPrefix) {
this.keyPrefix = keyPrefix;
}
public Duration getTimeToLive() {
return timeToLive;
}
public void setTimeToLive(Duration timeToLive) {
this.timeToLive = timeToLive;
}
public Boolean getRetryEnabled() {
return retryEnabled;
}
public void setRetryEnabled(Boolean retryEnabled) {
this.retryEnabled = retryEnabled;
}
public Boolean getExceptionThrowingEnabled() {
return exceptionThrowingEnabled;
}
public void setExceptionThrowingEnabled(Boolean exceptionThrowingEnabled) {
this.exceptionThrowingEnabled = exceptionThrowingEnabled;
}
}
| 752 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheException.java
|
package com.netflix.evcache;
public class EVCacheException extends Exception {
private static final long serialVersionUID = -3885811159646046383L;
public EVCacheException(String message) {
super(message);
}
public EVCacheException(String message, Throwable cause) {
super(message, cause);
}
}
| 753 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheLatch.java
|
package com.netflix.evcache;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.netflix.evcache.event.EVCacheEvent;
import net.spy.memcached.internal.OperationCompletionListener;
/**
* EVCacheLatch is a blocking mechanism that allows one or more threads to wait until
* a set of operations as specified by {@Link Policy} performed by evcache threads are complete.
*
* <p>The Latch is initialized with a <em>count</em> as determined by the Policy.
* The {@link #await await} methods block until the current count reaches
* zero due to completion of the operation, after which
* all waiting threads are released and any subsequent invocations of
* {@link #await await} return immediately.
*
* The latch is also released if the specified timeout is reached even though the count is greater than zero.
* In this case the {@link #await await} method returns false
*
* The various methods in latch can be queried any time and they return the state of the operations across the Futures.
*/
public interface EVCacheLatch extends OperationCompletionListener {
/**
* The Policy which can be used to control the latch behavior. The latch is released when the number operations as specified by the Policy are completed.
* For example: If your evcache app has 3 copies (3 server groups) in a region then each write done on that app will perform 3 operations (one for each copy/server group).
* If you are doing a set operation and the selected Policy is ALL_MINUS_1 then we need to complete 2 operations(set on 2 copies/server groups) need to be finished before we release the latch.
*
* Note that an Operation completed means that the operation was accepted by evcache or rejected by evcache.
* If it is still in flight the that operation is in pending state.
*
* Case ALL : All the operations have to be completed.
* Case All_MINUS_1 : All but one needs to be completed. For ex: If there are 3 copies for a cache then 2 need to be completed.
* Case QUORUM: Quorum number of operations have to be completed before we release the latch: for a cluster with 3 this means 2 operations need to be completed.
* Case ONE: At least one operations needs to be completed before we release the latch.
* Case NONE: The latch is released immediately.
*
* @author smadappa
*
*/
public static enum Policy {
NONE, ONE, QUORUM, ALL_MINUS_1, ALL
}
/**
* Causes the current thread to wait until the latch has counted down to
* zero, unless the thread is interrupted, or the specified waiting time
* elapses.
*
* @param timeout
* - the maximum time to wait
* @param unit
* - the time unit of the timeout argument
*
* @return - {@code true} if the count reached zero and false if the waiting
* time elapsed before the count reached zero
* @throws InterruptedException
* if the current thread is interrupted while waiting
*/
boolean await(long timeout, TimeUnit unit) throws InterruptedException;
/**
* Returns {@code true} if this all the tasks assigned for this Latch
* completed.
*
* Completion may be due to normal termination, an exception, or
* cancellation -- in all of these cases, this method will return
* {@code true}.
*
* @return {@code true} if all the tasks completed
*/
boolean isDone();
/**
* Returns the Futures backing the Pending tasks.
*
* @return the current outstanding tasks
*/
List<Future<Boolean>> getPendingFutures();
/**
* Returns all the Tasks.
*
* @return the tasks submitted part of this Latch
*/
List<Future<Boolean>> getAllFutures();
/**
* Returns all the completed Tasks.
*
* @return the current completed tasks
*/
List<Future<Boolean>> getCompletedFutures();
/**
* Returns the number of Futures that are still Pending.
*
* @return the current outstanding Future task count
*/
int getPendingFutureCount();
/**
* Returns the number of Future Tasks that are completed.
*
* @return the current completed future task count
*/
int getCompletedFutureCount();
/**
* Returns the number of Tasks that are still Pending.
*
* @return the current outstanding task count
*/
int getPendingCount();
/**
* Returns the number of Tasks that are completed. A task is completed if
* the task was finished either success of failure. The task is considered
* failure if it times out or there was an exception.
*
* @return the completed task count
*/
int getCompletedCount();
/**
* Returns the number of Tasks that failed to complete. There was either an
* exception or the task was cancelled.
*
* @return the failed task count
*/
int getFailureCount();
/**
* Returns the number of Tasks that need to be successfully completed based
* on the Specified Policy before the latch can be released.
*
* @return the expected success count
* @deprecated replaced by {@link #getExpectedCompleteCount()}
*/
int getExpectedSuccessCount();
/**
* Returns the number of Tasks that need to be successfully completed based
* on the Specified Policy before the latch can be released.
*
* @return the expected success count
*/
int getExpectedCompleteCount();
/**
* Returns the current number of Tasks that are successful .
*
* @return the current Successful Task count.
*/
int getSuccessCount();
/**
* The {@code Policy} for this Latch
*
* @return the Latch.
*/
Policy getPolicy();
/**
* Returns {@code true} if the operation is a Fast failure i.e. the operation was not even performed.
*
* @return {@code true} upon fast failure else false.
*/
boolean isFastFailure();
/**
* The event associated with this Latch
*
* @return the EVCacheEvent associated with this latch or null if there is none.
*/
void setEVCacheEvent(EVCacheEvent event);
}
| 754 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheConnectException.java
|
package com.netflix.evcache;
import java.io.IOException;
public class EVCacheConnectException extends IOException {
private static final long serialVersionUID = 8065483548278456469L;
public EVCacheConnectException(String message) {
super(message);
}
public EVCacheConnectException(String message, Throwable cause) {
super(message, cause);
}
}
| 755 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheGetOperationListener.java
|
package com.netflix.evcache;
import com.netflix.evcache.operation.EVCacheOperationFuture;
import net.spy.memcached.internal.GenericCompletionListener;
public interface EVCacheGetOperationListener<T> extends GenericCompletionListener<EVCacheOperationFuture<T>> {
}
| 756 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheMissException.java
|
package com.netflix.evcache;
public class EVCacheMissException extends EVCacheException {
private static final long serialVersionUID = 222337840463312890L;
public EVCacheMissException(String message) {
super(message);
}
public EVCacheMissException(String message, Throwable cause) {
super(message, cause);
}
}
| 757 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInMemoryCache.java
|
package com.netflix.evcache;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import com.netflix.archaius.api.Property;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.CacheStats;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListenableFutureTask;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.transcoders.Transcoder;
/**
* An In Memory cache that can be used to hold data for short duration. This is
* helpful when the same key is repeatedly requested from EVCache within a short
* duration. This can be turned on dynamically and can relive pressure on
* EVCache Server instances.
*/
public class EVCacheInMemoryCache<T> {
private static final Logger log = LoggerFactory.getLogger(EVCacheInMemoryCache.class);
private final Property<Integer> _cacheDuration; // The key will be cached for this long
private final Property<Integer> _refreshDuration, _exireAfterAccessDuration;
private final Property<Integer> _cacheSize; // This many items will be cached
private final Property<Integer> _poolSize; // This many threads will be initialized to fetch data from evcache async
private final String appName;
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>();
private LoadingCache<EVCacheKey, Optional<T>> cache;
private ExecutorService pool = null;
private final Transcoder<T> tc;
private final EVCacheImpl impl;
private final Id sizeId;
public EVCacheInMemoryCache(String appName, Transcoder<T> tc, EVCacheImpl impl) {
this.appName = appName;
this.tc = tc;
this.impl = impl;
this._cacheDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.write.duration.ms", Integer.class).orElseGet(appName + ".inmemory.cache.duration.ms").orElse(0);
this._cacheDuration.subscribe((i) -> setupCache());
this._exireAfterAccessDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.expire.after.access.duration.ms", Integer.class).orElse(0);
this._exireAfterAccessDuration.subscribe((i) -> setupCache());;
this._refreshDuration = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.refresh.after.write.duration.ms", Integer.class).orElse(0);
this._refreshDuration.subscribe((i) -> setupCache());
this._cacheSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".inmemory.cache.size", Integer.class).orElse(100);
this._cacheSize.subscribe((i) -> setupCache());
this._poolSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".thread.pool.size", Integer.class).orElse(5);
this._poolSize.subscribe((i) -> initRefreshPool());
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, "size"));
this.sizeId = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags);
setupCache();
setupMonitoring(appName);
}
private WriteLock writeLock = new ReentrantReadWriteLock().writeLock();
private void initRefreshPool() {
final ExecutorService oldPool = pool;
writeLock.lock();
try {
final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(
"EVCacheInMemoryCache-%d").build();
pool = Executors.newFixedThreadPool(_poolSize.get(), factory);
if(oldPool != null) oldPool.shutdown();
} finally {
writeLock.unlock();
}
}
private void setupCache() {
try {
CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder().recordStats();
if(_cacheSize.get() > 0) {
builder = builder.maximumSize(_cacheSize.get());
}
if(_exireAfterAccessDuration.get() > 0) {
builder = builder.expireAfterAccess(_exireAfterAccessDuration.get(), TimeUnit.MILLISECONDS);
} else if(_cacheDuration.get().intValue() > 0) {
builder = builder.expireAfterWrite(_cacheDuration.get(), TimeUnit.MILLISECONDS);
}
if(_refreshDuration.get() > 0) {
builder = builder.refreshAfterWrite(_refreshDuration.get(), TimeUnit.MILLISECONDS);
}
initRefreshPool();
final LoadingCache<EVCacheKey, Optional<T>> newCache = builder.build(
new CacheLoader<EVCacheKey, Optional<T>>() {
public Optional<T> load(EVCacheKey key) throws EVCacheException, DataNotFoundException {
try {
return Optional.fromNullable(impl.doGet(key, tc));
} catch (EVCacheException e) {
log.error("EVCacheException while loading key -> "+ key, e);
throw e;
} catch (Exception e) {
log.error("EVCacheException while loading key -> "+ key, e);
throw new EVCacheException("key : " + key + " could not be loaded", e);
}
}
@Override
public ListenableFuture<Optional<T>> reload(EVCacheKey key, Optional<T> oldValue) {
ListenableFutureTask<Optional<T>> task = ListenableFutureTask.create(new Callable<Optional<T>>() {
public Optional<T> call() {
try {
final Optional<T> t = load(key);
if(t == null) {
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-NotFound");
return oldValue;
} else {
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Success");
}
return t;
} catch (EVCacheException e) {
log.error("EVCacheException while reloading key -> "+ key, e);
EVCacheMetricsFactory.getInstance().increment("EVCacheInMemoryCache" + "-" + appName + "-Reload-Fail");
return oldValue;
}
}
});
pool.execute(task);
return task;
}
});
if(cache != null) newCache.putAll(cache.asMap());
final Cache<EVCacheKey, Optional<T>> currentCache = this.cache;
this.cache = newCache;
if(currentCache != null) {
currentCache.invalidateAll();
currentCache.cleanUp();
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
private CacheStats previousStats = null;
private long getSize() {
final long size = cache.size();
final CacheStats stats = cache.stats();
if(previousStats != null) {
try {
getCounter("hits").increment(stats.hitCount() - previousStats.hitCount());
getCounter("miss").increment(stats.missCount() - previousStats.missCount());
getCounter("evictions").increment(stats.evictionCount() - previousStats.evictionCount());
getCounter("requests").increment(stats.requestCount() - previousStats.requestCount());
getCounter("loadExceptionCount").increment(stats.loadExceptionCount() - previousStats.loadExceptionCount());
getCounter("loadCount").increment(stats.loadCount() - previousStats.loadCount());
getCounter("loadSuccessCount").increment(stats.loadSuccessCount() - previousStats.loadSuccessCount());
getCounter("totalLoadTime-ms").increment(( stats.totalLoadTime() - previousStats.totalLoadTime())/1000000);
getGauge("hitrate").set(stats.hitRate());
getGauge("loadExceptionRate").set(stats.loadExceptionRate());
getGauge("averageLoadTime-ms").set(stats.averageLoadPenalty()/1000000);
} catch(Exception e) {
log.error("Error while reporting stats", e);
}
}
previousStats = stats;
return size;
}
@SuppressWarnings("deprecation")
private void setupMonitoring(final String appName) {
EVCacheMetricsFactory.getInstance().getRegistry().gauge(sizeId, this, EVCacheInMemoryCache::getSize);
}
private Counter getCounter(String name) {
Counter counter = counterMap.get(name);
if(counter != null) return counter;
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name));
counter = EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.IN_MEMORY, tags);
counterMap.put(name, counter);
return counter;
}
private Gauge getGauge(String name) {
Gauge gauge = gaugeMap.get(name);
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(3);
tags.addAll(impl.getTags());
tags.add(new BasicTag(EVCacheMetricsFactory.METRIC, name));
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.IN_MEMORY, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
public T get(EVCacheKey key) throws ExecutionException {
if (cache == null) return null;
final Optional<T> val = cache.get(key);
if(!val.isPresent()) return null;
if (log.isDebugEnabled()) log.debug("GET : appName : " + appName + "; Key : " + key + "; val : " + val);
return val.get();
}
public void put(EVCacheKey key, T value) {
if (cache == null) return;
cache.put(key, Optional.fromNullable(value));
if (log.isDebugEnabled()) log.debug("PUT : appName : " + appName + "; Key : " + key + "; val : " + value);
}
public void delete(String key) {
if (cache == null) return;
cache.invalidate(key);
if (log.isDebugEnabled()) log.debug("DEL : appName : " + appName + "; Key : " + key);
}
public Map<EVCacheKey, Optional<T>> getAll() {
if (cache == null) return Collections.<EVCacheKey, Optional<T>>emptyMap();
return cache.asMap();
}
public static final class DataNotFoundException extends EVCacheException {
private static final long serialVersionUID = 1800185311509130263L;
public DataNotFoundException(String message) {
super(message);
}
}
}
| 758 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternal.java
|
package com.netflix.evcache;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.CachedData;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.transcoders.Transcoder;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;
public interface EVCacheInternal extends EVCache {
EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException;
Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException;
EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException;
Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException;
Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException;
EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroup) throws EVCacheException;
EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException;
KeyHashedState isKeyHashed(String appName, String serverGroup);
public enum KeyHashedState {
YES,
NO,
MAYBE
}
public static class CachedValues {
private final String key;
private final CachedData data;
private EVCacheItemMetaData itemMetaData;
public CachedValues(String key, CachedData data, EVCacheItemMetaData itemMetaData) {
this.key = key;
this.data = data;
this.itemMetaData = itemMetaData;
}
public String getKey() {
return key;
}
public CachedData getData() {
return data;
}
public EVCacheItemMetaData getEVCacheItemMetaData() {
return itemMetaData;
}
}
public class Builder extends EVCache.Builder {
public Builder() {
super();
}
@Override
protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) {
return new EVCacheInternalImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager);
}
}
}
| 759 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheTranscoder.java
|
package com.netflix.evcache;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.CachedData;
public class EVCacheTranscoder extends EVCacheSerializingTranscoder {
public EVCacheTranscoder() {
this(EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.max.data.size", Integer.class).orElse(20 * 1024 * 1024).get());
}
public EVCacheTranscoder(int max) {
this(max, EVCacheConfig.getInstance().getPropertyRepository().get("default.evcache.compression.threshold", Integer.class).orElse(120).get());
}
public EVCacheTranscoder(int max, int compressionThreshold) {
super(max);
setCompressionThreshold(compressionThreshold);
}
@Override
public boolean asyncDecode(CachedData d) {
return super.asyncDecode(d);
}
@Override
public Object decode(CachedData d) {
return super.decode(d);
}
@Override
public CachedData encode(Object o) {
if (o != null && o instanceof CachedData) return (CachedData) o;
return super.encode(o);
}
}
| 760 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheInternalImpl.java
|
package com.netflix.evcache;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheValue;
import com.netflix.evcache.pool.ServerGroup;
import net.spy.memcached.CachedData;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.transcoders.Transcoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
/**
* This class is for internal-use only by EVCache components, and is not recommended to be used for any other purpose. EVCache and EVCacheImpl are recommended instead.
*/
class EVCacheInternalImpl extends EVCacheImpl implements EVCacheInternal {
private static final Logger log = LoggerFactory.getLogger(EVCacheInternalImpl.class);
public EVCacheItem<CachedData> metaGet(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException {
return this.metaGetInternal(key, tc, isOriginalKeyHashed);
}
public Map<MemcachedNode, CachedValues> metaGetPerClient(String key, Transcoder<CachedData> tc, boolean isOriginalKeyHashed) throws EVCacheException {
Map<MemcachedNode, CachedValues> map = new HashMap<>();
final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone();
final Map<ServerGroup, EVCacheClient> instancesWithNull = new HashMap<ServerGroup, EVCacheClient>();
final EVCacheKey evcKey = getEVCacheKey(key);
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
EVCacheItem<CachedData> item = getEVCacheItem(client, evcKey, tc, true, false, isOriginalKeyHashed, false);
if (log.isDebugEnabled()) log.debug("client : " + client + "; item : " + item);
if(item == null) {
instancesWithNull.put(sGroup, client);
} else {
map.put(client.getNodeLocator().getPrimary(key), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData()));
}
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
if (log.isDebugEnabled()) log.debug("map : " + map);
if (log.isDebugEnabled()) log.debug("instancesWithNull : " + instancesWithNull);
if(instancesWithNull.size() > 0 && map.size() > 0) {
final EVCacheTranscoder transcoder = new EVCacheTranscoder();
String originalKey = null;
for(CachedValues vals : map.values()) {
if (log.isDebugEnabled()) log.debug("vals : " + vals);
try {
Object obj = transcoder.decode(vals.getData());
if (log.isDebugEnabled()) log.debug("Obj : " + obj);
if(obj instanceof EVCacheValue) {
originalKey = ((EVCacheValue)obj).getKey();
if (log.isDebugEnabled()) log.debug("original key: " + originalKey);
break;
}
} catch(Exception e) {
log.error("Exception decoding", e);
}
}
if(originalKey != null) {
for(ServerGroup sGroup : instancesWithNull.keySet()) {
if (log.isDebugEnabled()) log.debug("sGroup : " + sGroup);
final EVCacheClient client = instancesWithNull.get(sGroup);
if (log.isDebugEnabled()) log.debug("Client : " + client);
EVCacheItem<CachedData> item;
try {
item = getEVCacheItem(client, getEVCacheKey(originalKey), tc, true, false, false, false);
if (log.isDebugEnabled()) log.debug("item : " + item);
map.put(client.getNodeLocator().getPrimary(originalKey), null == item ? null : new CachedValues(key, item.getData(), item.getItemMetaData()));
} catch (Exception e) {
log.error("Exception getting meta data using original key - " + originalKey, e);
}
}
}
} else if(map.size() == 0) {
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
map.put(client.getNodeLocator().getPrimary(key), null);
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
}
if (log.isDebugEnabled()) log.debug("return map : " + map);
return map;
}
public EVCacheItemMetaData metaDebug(String key, boolean isOriginalKeyHashed) throws EVCacheException {
return this.metaDebugInternal(key, isOriginalKeyHashed);
}
public Map<MemcachedNode, EVCacheItemMetaData> metaDebugPerClient(String key, boolean isOriginalKeyHashed) throws EVCacheException {
Map<MemcachedNode, EVCacheItemMetaData> map = new HashMap<>();
final Map<ServerGroup, List<EVCacheClient>> instancesByZone = _pool.getAllInstancesByZone();
final EVCacheKey evcKey = getEVCacheKey(key);
for (ServerGroup sGroup : instancesByZone.keySet()) {
try {
for (EVCacheClient client : instancesByZone.get(sGroup)) {
EVCacheItemMetaData itemMetaData = getEVCacheItemMetaData(client, evcKey, true, false, isOriginalKeyHashed);
map.put(client.getNodeLocator().getPrimary(key), itemMetaData);
}
} catch (Exception e) {
log.error("Error getting meta data", e);
}
}
return map;
}
public Future<Boolean>[] delete(String key, boolean isOriginalKeyHashed) throws EVCacheException {
return this.deleteInternal(key, isOriginalKeyHashed);
}
public EVCacheInternalImpl(String appName, String cacheName, int timeToLive, Transcoder<?> transcoder, boolean enableZoneFallback,
boolean throwException, EVCacheClientPoolManager poolManager) {
super(appName, cacheName, timeToLive, transcoder, enableZoneFallback, throwException, poolManager);
}
public EVCacheLatch addOrSetToWriteOnly(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException {
EVCacheClient[] clients = _pool.getWriteOnlyEVCacheClients();
if (replaceItem)
return set(key, value, null, timeToLive, policy, clients, 0);
else
return add(key, value, null, timeToLive, policy, clients, 0, false);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups) throws EVCacheException {
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, null);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName) throws EVCacheException {
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroupName, null);
}
public EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, String serverGroupName, List<String> destinationIps) throws EVCacheException {
List<String> serverGroups = new ArrayList<>();
serverGroups.add(serverGroupName);
return addOrSet(replaceItem, key, value, timeToLive, policy, serverGroups, destinationIps);
}
private EVCacheLatch addOrSet(boolean replaceItem, String key, CachedData value, int timeToLive, EVCacheLatch.Policy policy, List<String> serverGroups, List<String> destinationIps) throws EVCacheException {
Map<ServerGroup, List<EVCacheClient>> clientsByServerGroup = _pool.getAllInstancesByZone();
List<EVCacheClient> evCacheClients = clientsByServerGroup.entrySet().stream()
.filter(entry -> serverGroups.contains(entry.getKey().getName()))
.map(Map.Entry::getValue)
.flatMap(List::stream)
.collect(Collectors.toList());
if (null != destinationIps && !destinationIps.isEmpty()) {
// identify that evcache client whose primary node is the destination ip for the key being processed
evCacheClients = evCacheClients.stream().filter(client ->
destinationIps.contains(((InetSocketAddress) client.getNodeLocator()
.getPrimary(getEVCacheKey(key).getDerivedKey(client.isDuetClient(), client.getHashingAlgorithm(), client.shouldEncodeHashKey(), client.getMaxDigestBytes(), client.getMaxHashLength(), client.getBaseEncoder()))
.getSocketAddress()).getAddress().getHostAddress())
).collect(Collectors.toList());
}
EVCacheClient[] evCacheClientsArray = new EVCacheClient[evCacheClients.size()];
evCacheClients.toArray(evCacheClientsArray);
if (replaceItem) {
return this.set(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length);
}
else {
// given that we do not want to replace items, we should explicitly set fixup to false, otherwise "add" can
// result in "set" during fixup which can result in replacing items
return this.add(key, value, null, timeToLive, policy, evCacheClientsArray, evCacheClientsArray.length, false);
}
}
public KeyHashedState isKeyHashed(String appName, String serverGroup) {
PropertyRepository propertyRepository = _poolManager.getEVCacheConfig().getPropertyRepository();
boolean isKeyHashedAtAppOrAsg = propertyRepository.get(serverGroup + ".hash.key", Boolean.class).orElseGet(appName + ".hash.key").orElse(false).get();
if (isKeyHashedAtAppOrAsg) {
return KeyHashedState.YES;
}
if (propertyRepository.get(appName + ".auto.hash.keys", Boolean.class).orElseGet("evcache.auto.hash.keys").orElse(false).get()) {
return KeyHashedState.MAYBE;
}
return KeyHashedState.NO;
}
}
| 761 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheImplMBean.java
|
package com.netflix.evcache;
public interface EVCacheImplMBean extends EVCache {
}
| 762 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheSerializingTranscoder.java
|
/**
* Copyright (C) 2006-2009 Dustin Sallings
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
* IN THE SOFTWARE.
*/
package com.netflix.evcache;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.BaseSerializingTranscoder;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.transcoders.TranscoderUtils;
import net.spy.memcached.util.StringUtils;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
/**
* Transcoder that serializes and compresses objects.
*/
public class EVCacheSerializingTranscoder extends BaseSerializingTranscoder implements
Transcoder<Object> {
// General flags
static final int SERIALIZED = 1;
static final int COMPRESSED = 2;
// Special flags for specially handled types.
private static final int SPECIAL_MASK = 0xff00;
static final int SPECIAL_BOOLEAN = (1 << 8);
static final int SPECIAL_INT = (2 << 8);
static final int SPECIAL_LONG = (3 << 8);
static final int SPECIAL_DATE = (4 << 8);
static final int SPECIAL_BYTE = (5 << 8);
static final int SPECIAL_FLOAT = (6 << 8);
static final int SPECIAL_DOUBLE = (7 << 8);
static final int SPECIAL_BYTEARRAY = (8 << 8);
static final String COMPRESSION = "COMPRESSION_METRIC";
private final TranscoderUtils tu = new TranscoderUtils(true);
private Timer timer;
/**
* Get a serializing transcoder with the default max data size.
*/
public EVCacheSerializingTranscoder() {
this(CachedData.MAX_SIZE);
}
/**
* Get a serializing transcoder that specifies the max data size.
*/
public EVCacheSerializingTranscoder(int max) {
super(max);
}
@Override
public boolean asyncDecode(CachedData d) {
if ((d.getFlags() & COMPRESSED) != 0 || (d.getFlags() & SERIALIZED) != 0) {
return true;
}
return super.asyncDecode(d);
}
/*
* (non-Javadoc)
*
* @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData)
*/
public Object decode(CachedData d) {
byte[] data = d.getData();
Object rv = null;
if ((d.getFlags() & COMPRESSED) != 0) {
data = decompress(d.getData());
}
int flags = d.getFlags() & SPECIAL_MASK;
if ((d.getFlags() & SERIALIZED) != 0 && data != null) {
rv = deserialize(data);
} else if (flags != 0 && data != null) {
switch (flags) {
case SPECIAL_BOOLEAN:
rv = Boolean.valueOf(tu.decodeBoolean(data));
break;
case SPECIAL_INT:
rv = Integer.valueOf(tu.decodeInt(data));
break;
case SPECIAL_LONG:
rv = Long.valueOf(tu.decodeLong(data));
break;
case SPECIAL_DATE:
rv = new Date(tu.decodeLong(data));
break;
case SPECIAL_BYTE:
rv = Byte.valueOf(tu.decodeByte(data));
break;
case SPECIAL_FLOAT:
rv = new Float(Float.intBitsToFloat(tu.decodeInt(data)));
break;
case SPECIAL_DOUBLE:
rv = new Double(Double.longBitsToDouble(tu.decodeLong(data)));
break;
case SPECIAL_BYTEARRAY:
rv = data;
break;
default:
getLogger().warn("Undecodeable with flags %x", flags);
}
} else {
rv = decodeString(data);
}
return rv;
}
/*
* (non-Javadoc)
*
* @see net.spy.memcached.Transcoder#encode(java.lang.Object)
*/
public CachedData encode(Object o) {
byte[] b = null;
int flags = 0;
if (o instanceof String) {
b = encodeString((String) o);
if (StringUtils.isJsonObject((String) o)) {
return new CachedData(flags, b, getMaxSize());
}
} else if (o instanceof Long) {
b = tu.encodeLong((Long) o);
flags |= SPECIAL_LONG;
} else if (o instanceof Integer) {
b = tu.encodeInt((Integer) o);
flags |= SPECIAL_INT;
} else if (o instanceof Boolean) {
b = tu.encodeBoolean((Boolean) o);
flags |= SPECIAL_BOOLEAN;
} else if (o instanceof Date) {
b = tu.encodeLong(((Date) o).getTime());
flags |= SPECIAL_DATE;
} else if (o instanceof Byte) {
b = tu.encodeByte((Byte) o);
flags |= SPECIAL_BYTE;
} else if (o instanceof Float) {
b = tu.encodeInt(Float.floatToRawIntBits((Float) o));
flags |= SPECIAL_FLOAT;
} else if (o instanceof Double) {
b = tu.encodeLong(Double.doubleToRawLongBits((Double) o));
flags |= SPECIAL_DOUBLE;
} else if (o instanceof byte[]) {
b = (byte[]) o;
flags |= SPECIAL_BYTEARRAY;
} else {
b = serialize(o);
flags |= SERIALIZED;
}
assert b != null;
if (b.length > compressionThreshold) {
byte[] compressed = compress(b);
if (compressed.length < b.length) {
getLogger().debug("Compressed %s from %d to %d",
o.getClass().getName(), b.length, compressed.length);
b = compressed;
flags |= COMPRESSED;
} else {
getLogger().info("Compression increased the size of %s from %d to %d",
o.getClass().getName(), b.length, compressed.length);
}
long compression_ratio = Math.round((double) compressed.length / b.length * 100);
updateTimerWithCompressionRatio(compression_ratio);
}
return new CachedData(flags, b, getMaxSize());
}
private void updateTimerWithCompressionRatio(long ratio_percentage) {
if(timer == null) {
final List<Tag> tagList = new ArrayList<Tag>(1);
tagList.add(new BasicTag(EVCacheMetricsFactory.COMPRESSION_TYPE, "gzip"));
timer = EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.COMPRESSION_RATIO, tagList, Duration.ofMillis(100));
};
timer.record(ratio_percentage, TimeUnit.MILLISECONDS);
}
}
| 763 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCache.java
|
package com.netflix.evcache;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.evcache.EVCacheLatch.Policy;
import com.netflix.evcache.operation.EVCacheItem;
import com.netflix.evcache.operation.EVCacheItemMetaData;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import net.spy.memcached.transcoders.Transcoder;
import rx.Scheduler;
import rx.Single;
/**
* An abstract interface for interacting with an Ephemeral Volatile Cache.
*
* <h3>Example</h3>
* <p>
* To create an instance of EVCache with AppName="EVCACHE", cachePrefix="Test"
* and DefaultTTL="3600"
*
* <b>Dependency Injection (Guice) Approach</b> <blockquote>
*
* <pre>
* {@literal @}Inject
* public MyClass(EVCache.Builder builder,....) {
* EVCache myCache = builder.setAppName("EVCACHE").setCachePrefix("Test").setDefaultTTL(3600).build();
* }
* </pre>
*
* </blockquote>
*
* Below is an example to set value="John Doe" for key="name" <blockquote>
*
* <pre>
* myCache.set("name", "John Doe");
* </pre>
*
* </blockquote>
*
*
* To read the value for key="name" <blockquote>
*
* <pre>
* String value = myCache.get("name");
* </pre>
*
* </blockquote>
*
* </p>
*
* @author smadappa
*/
public interface EVCache {
// TODO: Remove Async methods (Project rx) and rename COMPLETABLE_* with ASYNC_*
public static enum Call {
GET, GETL, GET_AND_TOUCH, ASYNC_GET, BULK, SET, DELETE, INCR, DECR, TOUCH, APPEND, PREPEND, REPLACE, ADD, APPEND_OR_ADD, GET_ALL, META_GET, META_SET, META_DEBUG,
COMPLETABLE_FUTURE_GET, COMPLETABLE_FUTURE_GET_BULK
};
/**
* Set an object in the EVCACHE (using the default Transcoder) regardless of
* any existing value.
*
* The <code>timeToLive</code> value passed to memcached is as specified in
* the defaultTTL value for this cache
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @return Array of futures representing the processing of this operation
* across all replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues with Serializing the value or any
* IO Related issues
*/
<T> Future<Boolean>[] set(String key, T value) throws EVCacheException;
/**
* Set an object in the EVCACHE (using the default Transcoder) regardless of
* any existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC
* time (number of seconds since January 1, 1970, as a 32-bit int value), or
* a number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, int timeToLive) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC
* time (number of seconds since January 1, 1970, as a 32-bit int value), or
* a number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the default TTL and Transcoder.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added.
* Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value with the given TTL.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time a.k.a EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, int timeToLive, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any existing value using the given Transcoder.
*
* The <code>timeToLive</code> value is passed to memcached exactly as given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time (number of seconds since January 1, 1970, as a 32-bit int value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the key is properly encoded and does not contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param tc
* the Transcoder to serialize the data
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
* @return Array of futures representing the processing of this operation across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept any more requests or issues Serializing the value or any IO Related issues
*/
<T> EVCacheLatch set(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Set an object in the EVCACHE using the given Transcoder regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch set(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy)
throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the default Transcoder &
* default TTL. If the object does not exist in EVCACHE then the value is
* not replaced.
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the given Transcoder &
* default TTL. If the object does not exist in EVCACHE then the value is
* not replaced.
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Replace an existing object in the EVCACHE using the given Transcoder. If
* the object does not exist in EVCACHE then the value is not replaced.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be replaced. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to replace
* @param tc
* the Transcoder to serialize the data
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch replace(String key, T value, Transcoder<T> tc, int timeToLive, EVCacheLatch.Policy policy)
throws EVCacheException;
/**
* Set an object in the EVCACHE using the given {@link Transcoder}regardless of any
* existing value.
*
* The <code>timeToLive</code> value is passed to memcached exactly as
* given, and will be processed per the memcached protocol specification:
*
* <blockquote> The actual value sent may either be Unix time aka EPOC time
* (number of seconds since January 1, 1970, as a 32-bit int value), or a
* number of seconds starting from current time. In the latter case, this
* number of seconds may not exceed 60*60*24*30 (number of seconds in 30
* days); if the number sent by a client is larger than that, the server
* will consider it to be real Unix time value rather than an offset from
* current time. </blockquote>
*
* @param key
* the key under which this object should be added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 250 characters.
* @param T
* the object to store
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] set(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Remove a current key value relation from the Cache.
*
* @param key
* the non-null key corresponding to the relation to be removed.
* Ensure the key is properly encoded and does not contain
* whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return Array of futures representing the processing of this operation
* across all the replicas. If the future returns true then the key
* was deleted from Cache, if false then the key was not found thus
* not deleted. Note: In effect the outcome was what was desired.
* Note: If the null is returned then the operation timed out and
* probably the key was not deleted. In such scenario retry the
* operation.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
Future<Boolean>[] delete(String key) throws EVCacheException;
/**
* Remove a current key value relation from the Cache.
*
* @param key
* the non-null key corresponding to the relation to be removed.
* Ensure the key is properly encoded and does not contain
* whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
<T> EVCacheLatch delete(String key, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key) throws EVCacheException;
/**
* Async Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> CompletableFuture<T> getAsync(String key) throws EVCacheException;
/**
* Retrieve the value for the given key.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> get(String key, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Async Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Completable Future of value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> CompletableFuture<T> getAsync(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the meta data for the given a key
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @return the metadata for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues due IO
* Related issues
*
* Note: If the data is replicated by zone, then we get the metadata
* from the zone local to the client. If we cannot find
* the value then we try other zones, If all are unsuccessful then null is returned.
*/
default EVCacheItemMetaData metaDebug(String key) throws EVCacheException {
throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method.");
}
/**
* Retrieve the value & its metadata for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @return the Value for the given key from the cache (null if there is
* none) and its metadata all encapsulated in EVCacheItem.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value we retry other zones, if still not found, then null is returned.
*/
default <T> EVCacheItem<T> metaGet(String key, Transcoder<T> tc) throws EVCacheException {
throw new EVCacheException("Default implementation. If you are implementing EVCache interface you need to implement this method.");
}
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 250 characters.
* @param tc
* the Transcoder to deserialize the data
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
* @return the Value for the given key from the cache (null if there is
* none).
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*
* Note: If the data is replicated by zone, then we can get the
* value from the zone local to the client. If we cannot find
* this value then null is returned. This is transparent to the
* users.
*/
<T> T get(String key, Transcoder<T> tc, Policy policy) throws EVCacheException;
/**
* Retrieve the value for the given a key using the specified Transcoder for
* deserialization.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the Transcoder to deserialize the data
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> get(String key, Transcoder<T> tc, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the default Transcoder for
* deserialization and reset its expiration using the passed timeToLive.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> getAndTouch(String key, int timeToLive, Scheduler scheduler);
/**
* Retrieve the value for the given a key using the default Transcoder for
* deserialization and reset its expiration using the passed timeToLive.
*
* @param key
* key to get. Ensure the key is properly encoded and does not
* contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param tc
* the Transcoder to deserialize the data
* @param scheduler
* the {@link Scheduler} to perform subscription actions on
* @return the Value for the given key from the cache (null if there is
* none).
*/
<T> Single<T> getAndTouch(String key, int timeToLive, Transcoder<T> tc, Scheduler scheduler);
/**
* Get with a single key and reset its expiration.
*
* @param key
* the key to get. Ensure the key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return the result from the cache (null if there is none)
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> T getAndTouch(String key, int timeToLive) throws EVCacheException;
/**
* Get with a single key and reset its expiration.
*
* @param key
* the key to get. Ensure the key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param tc
* the Transcoder to deserialize the data
* @return the result from the cache (null if there is none)
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> T getAndTouch(String key, int timeToLive, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the value of a set of keys.
*
* @param keys
* the keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(String... keys) throws EVCacheException;
/**
* Async Retrieve the value of a set of keys.
*
* @param keys
* the keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
*/
<T> CompletableFuture<Map<String, T>> getAsyncBulk(String... keys);
/**
* Retrieve the value for a set of keys, using a specified Transcoder for
* deserialization.
*
* @param keys
* keys to which we need the values.Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Transcoder<T> tc, String... keys) throws EVCacheException;
/**
* Async Retrieve the value for a set of keys, using a specified Transcoder for
* deserialization. In Beta testing (To be used by gateway team)
*
* @param keys
* keys to which we need the values.Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
*/
<T> CompletableFuture<Map<String, T>> getAsyncBulk(Collection<String> keys, Transcoder<T> tc);
/**
* Retrieve the value for the collection of keys, using the default
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Collection<String> keys) throws EVCacheException;
/**
* Retrieve the value for the collection of keys, using the specified
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return a map of the values (for each value that exists). If the Returned
* map contains the key but the value in null then the key does not
* exist in the cache. if a key is missing then we were not able to
* retrieve the data for that key due to some exception
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulk(Collection<String> keys, Transcoder<T> tc) throws EVCacheException;
/**
* Retrieve the value for the collection of keys, using the specified
* Transcoder for deserialization.
*
* @param keys
* The collection of keys for which we need the values. Ensure each key is properly encoded and does
* not contain whitespace or control characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @param timeToLive
* the new expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @return a map of the values (for each value that exists). If the value of
* the given key does not exist then null is returned. Only the keys
* whose value are not null and exist in the returned map are set to
* the new TTL as specified in timeToLive.
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues during deserialization or any IO
* Related issues
*/
<T> Map<String, T> getBulkAndTouch(Collection<String> keys, Transcoder<T> tc, int timeToLive)
throws EVCacheException;
/**
* Get the value for given key asynchronously and deserialize it with the
* default transcoder.
*
* @param key
* the key for which we need the value. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @return the Futures containing the Value or null.
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues during deserialization or timeout
* retrieving the value or any IO Related issues
*
* @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc.
* Will be removed in a subsequent release
*/
<T> Future<T> getAsynchronous(String key) throws EVCacheException;
/**
* Get the value for given key asynchronously and deserialize it with the
* given transcoder.
*
* @param key
* the key for which we need the value. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param tc
* the transcoder to use for deserialization
* @return the Futures containing the Value or null.
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues during deserialization or timeout
* retrieving the value or any IO Related issues
*
* @deprecated This is a sub-optimal operation does not support Retries, Fast Failures, FIT, GC Detection, etc.
* Will be removed in a subsequent release
*/
<T> Future<T> getAsynchronous(String key, Transcoder<T> tc) throws EVCacheException;
/**
* Increment the given counter, returning the new value.
*
* @param key
* the key. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param by
* the amount to increment
* @param def
* the default value (if the counter does not exist)
* @param exp
* the expiration of this object
* @return the new value, or -1 if we were unable to increment or add
* @throws EVCacheException
* in the circumstance where timeout is exceeded or queue is
* full
*
*/
public long incr(String key, long by, long def, int exp) throws EVCacheException;
/**
* Decrement the given counter, returning the new value.
*
* @param key
* the key. Ensure the key is
* properly encoded and does not contain whitespace or control
* characters. The max length of the key (including prefix)
* is 200 characters.
* @param by
* the amount to decrement
* @param def
* the default value (if the counter does not exist)
* @param exp
* the expiration of this object
* @return the new value, or -1 if we were unable to decrement or add
* @throws EVCacheException
* in the circumstance where timeout is exceeded or queue is
* full
*
*/
public long decr(String key, long by, long def, int exp) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. You cannot
* append if the key does not exist in EVCache. If the value has not changed
* then false will be returned.
*
* @param key
* the key under which this object should be appended. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] append(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. You cannot
* append if the key does not exist in EVCache. If the value has not changed
* or does not exist then false will be returned.
*
* @param key
* the key under which this object should be appended. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] append(String key, T value, int timeToLive) throws EVCacheException;
/**
* @deprecated Please use {@link #<T> EVCacheLatch add(String, T, Transcoder<T> , int, Policy) throws EVCacheException;}
*
* Add the given value to EVCache. You cannot add if the key already exist in EVCache.
*
* @param key
* the key which this object should be added to. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be added
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return boolean which indicates if the add was successful or not.
* The operation will fail with a false response if the data already exists in EVCache.
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> boolean add(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Add the given value to EVCache. You cannot add if the key already exist in EVCache.
*
* @param key
* the key which this object should be added to. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be added
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation to ensure all adds are successful. If there are any partial success
* The client will try and fix the Data.
*
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch add(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException;
/**
* Touch the given key and reset its expiration time.
*
* @param key
* the key to touch. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param ttl
* the new expiration time in seconds
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] touch(String key, int ttl) throws EVCacheException;
/**
* Touch the given key and reset its expiration time.
*
* @param key
* the key to touch. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param ttl
* the new expiration time in seconds
*
* @param policy
* The Latch will be returned based on the Policy. The Latch can
* then be used to await until the count down has reached to 0 or
* the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the rare circumstance where queue is too full to accept
* any more requests or any IO Related issues
*/
<T> EVCacheLatch touch(String key, int ttl, EVCacheLatch.Policy policy) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. If the Key does not exist the the key will added.
*
*
* @param key
* the key under which this object should be appended or Added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @return Array of futures representing the processing of this operation
* across all the replicas
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> Future<Boolean>[] appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive) throws EVCacheException;
/**
* Append the given value to the existing value in EVCache. If the Key does not exist the the key will added.
*
*
* @param key
* the key under which this object should be appended or Added. Ensure the
* key is properly encoded and does not contain whitespace or
* control characters. The max length of the key (including prefix)
* is 200 characters.
* @param T
* the value to be appended
* @param tc
* the transcoder the will be used for serialization
* @param timeToLive
* the expiration of this object i.e. less than 30 days in
* seconds or the exact expiry time as UNIX time
*
* @param policy
* The Latch will be returned based on the Policy. The Latch can then be used to await until the count down has reached to 0 or the specified time has elapsed.
*
* @return EVCacheLatch which will encompasses the Operation. You can block
* on the Operation based on the policy to ensure the required
* criteria is met. The Latch can also be queried to get details on
* status of the operations
*
* @throws EVCacheException
* in the circumstance where queue is too full to accept any
* more requests or issues Serializing the value or any IO
* Related issues
*/
<T> EVCacheLatch appendOrAdd(String key, T value, Transcoder<T> tc, int timeToLive, Policy policy) throws EVCacheException;
/**
* The {@code appName} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
String getAppName();
/**
* The {@code cachePrefix} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
String getCachePrefix();
/**
* A Builder that builds an EVCache based on the specified App Name, cache
* Name, TTl and Transcoder.
*
* @author smadappa
*/
public class Builder {
private static final Logger logger = LoggerFactory.getLogger(EVCacheImpl.class);
private String _appName;
private String _cachePrefix = null;
private int _ttl = 900;
private Transcoder<?> _transcoder = null;
private boolean _serverGroupRetry = true;
private boolean _enableExceptionThrowing = false;
private List<Customizer> _customizers = new ArrayList<>();
@Inject
private EVCacheClientPoolManager _poolManager;
/**
* Customizers allow post-processing of the Builder. This affords a way for libraries to
* perform customization.
*/
@FunctionalInterface
public interface Customizer {
void customize(final String cacheName, final Builder builder);
}
public static class Factory {
public Builder createInstance(String appName) {
return Builder.forApp(appName);
}
}
public static Builder forApp(final String appName) {
return new Builder().setAppName(appName);
}
public Builder() {
}
public Builder withConfigurationProperties(
final EVCacheClientPoolConfigurationProperties configurationProperties) {
return this
.setCachePrefix(configurationProperties.getKeyPrefix())
.setDefaultTTL(configurationProperties.getTimeToLive())
.setRetry(configurationProperties.getRetryEnabled())
.setExceptionThrowing(configurationProperties.getExceptionThrowingEnabled());
}
/**
* The {@code appName} that will be used by this {@code EVCache}.
*
* @param The
* name of the EVCache App cluster.
* @return this {@code Builder} object
*/
public Builder setAppName(String appName) {
if (appName == null) throw new IllegalArgumentException("param appName cannot be null.");
this._appName = appName.toUpperCase(Locale.US);
if (!_appName.startsWith("EVCACHE")) logger.warn("Make sure the app you are connecting to is EVCache App");
return this;
}
/**
* Adds {@code cachePrefix} to the key. This ensures there are no cache
* collisions if the same EVCache app is used across multiple use cases.
* If the cache is not shared we recommend to set this to
* <code>null</code>. Default is <code>null</code>.
*
* @param cacheName.
* The cache prefix cannot contain colon (':') in it.
* @return this {@code Builder} object
*/
public Builder setCachePrefix(String cachePrefix) {
if (_cachePrefix != null && _cachePrefix.indexOf(':') != -1) throw new IllegalArgumentException(
"param cacheName cannot contain ':' character.");
this._cachePrefix = cachePrefix;
return this;
}
/**
* @deprecated Please use {@link #setCachePrefix(String)}
* @see #setCachePrefix(String)
*
* Adds {@code cacheName} to the key. This ensures there are no
* cache collisions if the same EVCache app is used for across
* multiple use cases.
*
* @param cacheName
* @return this {@code Builder} object
*/
public Builder setCacheName(String cacheName) {
return setCachePrefix(cacheName);
}
/**
* The default Time To Live (TTL) for items in {@link EVCache} in
* seconds. You can override the value by passing the desired TTL with
* {@link EVCache#set(String, Object, int)} operations.
*
* @param ttl. Default is 900 seconds.
* @return this {@code Builder} object
*/
public Builder setDefaultTTL(int ttl) {
if (ttl < 0) throw new IllegalArgumentException("Time to Live cannot be less than 0.");
this._ttl = ttl;
return this;
}
/**
* The default Time To Live (TTL) for items in {@link EVCache} in
* seconds. You can override the value by passing the desired TTL with
* {@link EVCache#set(String, Object, int)} operations.
*
* @param ttl. Default is 900 seconds.
* @return this {@code Builder} object
*/
public Builder setDefaultTTL(@Nullable final Duration ttl) {
if (ttl == null) {
return this;
}
return setDefaultTTL((int) ttl.getSeconds());
}
@VisibleForTesting
Transcoder<?> getTranscoder() {
return this._transcoder;
}
/**
* The default {@link Transcoder} to be used for serializing and
* de-serializing items in {@link EVCache}.
*
* @param transcoder
* @return this {@code Builder} object
*/
public <T> Builder setTranscoder(Transcoder<T> transcoder) {
this._transcoder = transcoder;
return this;
}
/**
* @deprecated Please use {@link #enableRetry()}
*
* Will enable retries across Zone (Server Group).
*
* @return this {@code Builder} object
*/
public <T> Builder enableZoneFallback() {
this._serverGroupRetry = true;
return this;
}
/**
* Will enable or disable retry across Server Group for cache misses and exceptions
* if there are multiple Server Groups for the given EVCache App and
* data is replicated across them. This ensures the Hit Rate continues
* to be unaffected whenever a server group loses instances.
*
* By Default retry is enabled.
*
* @param enableRetry whether retries are to be enabled
* @return this {@code Builder} object
*/
public Builder setRetry(boolean enableRetry) {
this._serverGroupRetry = enableRetry;
return this;
}
/**
* Will enable retry across Server Group for cache misses and exceptions
* if there are multiple Server Groups for the given EVCache App and
* data is replicated across them. This ensures the Hit Rate continues
* to be unaffected whenever a server group loses instances.
*
* By Default retry is enabled.
*
* @return this {@code Builder} object
*/
public <T> Builder enableRetry() {
this._serverGroupRetry = true;
return this;
}
/**
* Will disable retry across Server Groups. This means if the data is
* not found in one server group null is returned.
*
* @return this {@code Builder} object
*/
public <T> Builder disableRetry() {
this._serverGroupRetry = false;
return this;
}
/**
* @deprecated Please use {@link #disableRetry()}
*
* Will disable retry across Zone (Server Group).
*
* @return this {@code Builder} object
*/
public <T> Builder disableZoneFallback() {
this._serverGroupRetry = false;
return this;
}
/**
* By Default exceptions are not propagated and null values are
* returned. By enabling exception propagation we return the
* {@link EVCacheException} whenever the operations experience them.
*
* @param enableExceptionThrowing whether exception throwing is to be enabled
* @return this {@code Builder} object
*/
public Builder setExceptionThrowing(boolean enableExceptionThrowing) {
this._enableExceptionThrowing = enableExceptionThrowing;
return this;
}
/**
* By Default exceptions are not propagated and null values are
* returned. By enabling exception propagation we return the
* {@link EVCacheException} whenever the operations experience them.
*
* @return this {@code Builder} object
*/
public <T> Builder enableExceptionPropagation() {
this._enableExceptionThrowing = true;
return this;
}
/**
* Adds customizers to be applied by {@code customize}.
*
* @param customizers List of {@code Customizer}s
* @return this {@code Builder} object
*/
public Builder addCustomizers(@Nullable final List<Customizer> customizers) {
this._customizers.addAll(customizers);
return this;
}
/**
* Applies {@code Customizer}s added through {@code addCustomizers} to {@this}.
*
* @return this {@code Builder} object
*/
public Builder customize() {
_customizers.forEach(customizer -> {
customizeWith(customizer);
});
return this;
}
/**
* Customizes {@this} with the {@code customizer}.
*
* @param customizer {@code Customizer} or {@code Consumer<String, Builder>} to be applied to {@code this}.
* @return this {@code Builder} object
*/
public Builder customizeWith(final Customizer customizer) {
customizer.customize(this._appName, this);
return this;
}
protected EVCache newImpl(String appName, String cachePrefix, int ttl, Transcoder<?> transcoder, boolean serverGroupRetry, boolean enableExceptionThrowing, EVCacheClientPoolManager poolManager) {
return new EVCacheImpl(appName, cachePrefix, ttl, transcoder, serverGroupRetry, enableExceptionThrowing, poolManager);
}
/**
* Returns a newly created {@code EVCache} based on the contents of the
* {@code Builder}.
*/
@SuppressWarnings("deprecation")
public EVCache build() {
if (_poolManager == null) {
_poolManager = EVCacheClientPoolManager.getInstance();
if (logger.isDebugEnabled()) logger.debug("_poolManager - " + _poolManager + " through getInstance");
}
if (_appName == null) {
throw new IllegalArgumentException("param appName cannot be null.");
}
if(_cachePrefix != null) {
for(int i = 0; i < _cachePrefix.length(); i++) {
if(Character.isWhitespace(_cachePrefix.charAt(i))){
throw new IllegalArgumentException("Cache Prefix ``" + _cachePrefix + "`` contains invalid character at position " + i );
}
}
}
customize();
return newImpl(_appName, _cachePrefix, _ttl, _transcoder, _serverGroupRetry, _enableExceptionThrowing, _poolManager);
}
}
}
| 764 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/EVCacheReadQueueException.java
|
package com.netflix.evcache;
public class EVCacheReadQueueException extends EVCacheException {
private static final long serialVersionUID = -7660503904923117538L;
public EVCacheReadQueueException(String message) {
super(message);
}
public EVCacheReadQueueException(String message, Throwable cause) {
super(message, cause);
}
}
| 765 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/dto/EVCacheResponseStatus.java
|
package com.netflix.evcache.dto;
public class EVCacheResponseStatus {
private String status;
public EVCacheResponseStatus(String status) {
this.status = status;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
}
| 766 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/dto/KeyMapDto.java
|
package com.netflix.evcache.dto;
import com.netflix.evcache.EVCacheKey;
import java.util.Map;
public class KeyMapDto {
Map<String, EVCacheKey> keyMap;
boolean isKeyHashed;
public KeyMapDto(Map<String, EVCacheKey> keyMap, boolean isKeyHashed) {
this.keyMap = keyMap;
this.isKeyHashed = isKeyHashed;
}
public Map<String, EVCacheKey> getKeyMap() {
return keyMap;
}
public boolean isKeyHashed() {
return isKeyHashed;
}
}
| 767 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheItem.java
|
package com.netflix.evcache.operation;
public class EVCacheItem<T> {
private final EVCacheItemMetaData item;
private T data = null;
private int flag = 0;
public EVCacheItem() {
item = new EVCacheItemMetaData();
}
public EVCacheItemMetaData getItemMetaData() {
return item;
}
public T getData() {
return data;
}
public void setData(T data) {
this.data = data;
}
public int getFlag() {
return flag;
}
public void setFlag(int flag) {
this.flag = flag;
}
@Override
public String toString() {
return "EVCacheItem [item=" + item + ", data=" + data + ", flag=" + flag + "]";
}
}
| 768 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheOperationFuture.java
|
package com.netflix.evcache.operation;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicReference;
import net.spy.memcached.ops.OperationState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCacheGetOperationListener;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import com.sun.management.GcInfo;
import net.spy.memcached.MemcachedConnection;
import net.spy.memcached.internal.CheckedOperationTimeoutException;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.ops.Operation;
import rx.Scheduler;
import rx.Single;
import rx.functions.Action0;
/**
* Managed future for operations.
*
* <p>
* From an OperationFuture, application code can determine if the status of a
* given Operation in an asynchronous manner.
*
* <p>
* If for example we needed to update the keys "user:<userid>:name",
* "user:<userid>:friendlist" because later in the method we were going to
* verify the change occurred as expected interacting with the user, we can fire
* multiple IO operations simultaneously with this concept.
*
* @param <T>
* Type of object returned from this future.
*/
@SuppressWarnings("restriction")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings("EXS_EXCEPTION_SOFTENING_HAS_CHECKED")
public class EVCacheOperationFuture<T> extends OperationFuture<T> {
private static final Logger log = LoggerFactory.getLogger(EVCacheOperationFuture.class);
private static final class LazySharedExecutor {
private static final ScheduledThreadPoolExecutor executor =
new ScheduledThreadPoolExecutor(
1,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("evcache-timeout-%s")
.setUncaughtExceptionHandler(
(t, e) ->
log.error(
"{} timeout operation failed with exception: {}", t.getName(), e))
.build());
static {
// We don't need to keep around all those cancellation tasks taking up memory once the
// initial caller completes.
executor.setRemoveOnCancelPolicy(true);
}
}
private final CountDownLatch latch;
private final AtomicReference<T> objRef;
private Operation op;
private final String key;
private final long start;
private final EVCacheClient client;
public EVCacheOperationFuture(String k, CountDownLatch l, AtomicReference<T> oref, long opTimeout, ExecutorService service, EVCacheClient client) {
super(k, l, oref, opTimeout, service);
this.latch = l;
this.objRef = oref;
this.key = k;
this.client = client;
this.start = System.currentTimeMillis();
}
public Operation getOperation() {
return this.op;
}
public void setOperation(Operation to) {
this.op = to;
super.setOperation(to);
}
public String getApp() {
return client.getAppName();
}
public String getKey() {
return key;
}
public String getZone() {
return client.getZone();
}
public ServerGroup getServerGroup() {
return client.getServerGroup();
}
public EVCacheClient getEVCacheClient() {
return client;
}
public EVCacheOperationFuture<T> addListener(EVCacheGetOperationListener<T> listener) {
super.addToListeners(listener);
return this;
}
public EVCacheOperationFuture<T> removeListener(EVCacheGetOperationListener<T> listener) {
super.removeFromListeners(listener);
return this;
}
/**
* Get the results of the given operation.
*
* As with the Future interface, this call will block until the results of
* the future operation has been received.
*
* Note: If we detect there was GC pause and our operation was caught in
* between we wait again to see if we will be successful. This is effective
* as the timeout we specify is very low.
*
* @param duration
* amount of time to wait
* @param units
* unit of time to wait
* @param if
* exeception needs to be thrown of null returned on a failed
* operation
* @param has
* zone fallback
* @return the operation results of this OperationFuture
* @throws InterruptedException
* @throws TimeoutException
* @throws ExecutionException
*/
public T get(long duration, TimeUnit units, boolean throwException, boolean hasZF) throws InterruptedException, TimeoutException, ExecutionException {
boolean status = latch.await(duration, units);
if (!status) {
status = handleGCPauseForGet(duration, units, throwException, hasZF);
}
if (status) MemcachedConnection.opSucceeded(op);// continuous timeout counter will be reset
return objRef.get();
}
private boolean handleGCPauseForGet(long duration, TimeUnit units, boolean throwException, boolean hasZF) throws InterruptedException, ExecutionException {
boolean status;
boolean gcPause = false;
final RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
final long vmStartTime = runtimeBean.getStartTime();
final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean gcMXBean : gcMXBeans) {
if (gcMXBean instanceof com.sun.management.GarbageCollectorMXBean) {
final GcInfo lastGcInfo = ((com.sun.management.GarbageCollectorMXBean) gcMXBean).getLastGcInfo();
// If no GCs, there was no pause due to GC.
if (lastGcInfo == null) {
continue;
}
final long gcStartTime = lastGcInfo.getStartTime() + vmStartTime;
if (gcStartTime > start) {
gcPause = true;
final long gcDuration = lastGcInfo.getDuration();
final long pauseDuration = System.currentTimeMillis() - gcStartTime;
if (log.isDebugEnabled()) {
log.debug("Event Start Time = " + start + "; Last GC Start Time = " + gcStartTime + "; " + (gcStartTime - start) + " msec ago.\n"
+ "\nTotal pause duration due for this event = " + pauseDuration + " msec.\nTotal GC duration = " + gcDuration + " msec.");
}
break;
}
}
}
if (!gcPause && log.isDebugEnabled()) {
log.debug("Total pause duration due to NON-GC event = " + (System.currentTimeMillis() - start) + " msec.");
}
// redo the same op once more since there was a chance of gc pause
status = latch.await(duration, units);
if (log.isDebugEnabled()) log.debug("re-await status : " + status);
String statusString = EVCacheMetricsFactory.SUCCESS;
final long pauseDuration = System.currentTimeMillis() - start;
if (op != null && !status) {
// whenever timeout occurs, continuous timeout counter will increase by 1.
MemcachedConnection.opTimedOut(op);
op.timeOut();
ExecutionException t = null;
if(throwException && !hasZF) {
if (op.isTimedOut()) { t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op)); statusString = EVCacheMetricsFactory.CHECKED_OP_TIMEOUT; }
else if (op.isCancelled() && throwException) { t = new ExecutionException(new CancellationException("Cancelled"));statusString = EVCacheMetricsFactory.CANCELLED; }
else if (op.hasErrored() ) { t = new ExecutionException(op.getException());statusString = EVCacheMetricsFactory.ERROR; }
}
if(t != null) throw t; //finally throw the exception if needed
}
final List<Tag> tagList = new ArrayList<Tag>(client.getTagList().size() + 4);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.GET_OPERATION));
tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, gcPause ? EVCacheMetricsFactory.GC:EVCacheMetricsFactory.SCHEDULE));
tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, status ? EVCacheMetricsFactory.YES:EVCacheMetricsFactory.NO));
tagList.add(new BasicTag(EVCacheMetricsFactory.OPERATION_STATUS, statusString));
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_PAUSE, tagList, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getApp() + ".max.write.duration.metric", Integer.class).orElseGet("evcache.max.write.duration.metric").orElse(50).get().intValue())).record(pauseDuration, TimeUnit.MILLISECONDS);
return status;
}
public Single<T> observe() {
return Single.create(subscriber ->
addListener((EVCacheGetOperationListener<T>) future -> {
try {
subscriber.onSuccess(get());
} catch (Throwable e) {
subscriber.onError(e);
}
})
);
}
static <T> CompletableFuture<T> withTimeout(CompletableFuture<T> future,
long timeout,
TimeUnit unit) {
int timeoutSlots = getTimeoutSlots((int) timeout);
// [DABP-2005] split timeout to timeoutSlots slots to not timeout during GC.
long splitTimeout = Math.max(1, timeout / timeoutSlots);
CompletableFuture<Void> chain = CompletableFuture.completedFuture(null);
for (int i = 0; i < timeoutSlots; i++) {
final int j = i;
chain = chain.thenCompose(unused -> getNext(future, j, timeout, splitTimeout, unit, timeoutSlots));
}
return future;
}
private static int getTimeoutSlots(int timeout) {
if(log.isDebugEnabled()) log.debug("Timeout is {}", timeout);
int timeoutSlots;
int val = timeout /10;
if (val == 0 ) {
timeoutSlots = 1;
} else if (val >= 1 && val < 5) {
timeoutSlots = val;
} else {
timeoutSlots = 5;
}
if(log.isDebugEnabled()) log.debug("timeoutSlots is {}", timeoutSlots);
return timeoutSlots;
}
private static<T> CompletableFuture<Void> getNext(CompletableFuture<T> future,
final int j,
long timeout,
long splitTimeout,
TimeUnit unit,
int timeoutSlots) {
CompletableFuture<Void> next = new CompletableFuture<>();
if (future.isDone()) {
next.complete(null);
} else {
ScheduledFuture<?> scheduledTimeout;
if (j < timeoutSlots - 1) {
scheduledTimeout =
LazySharedExecutor.executor.schedule(
() -> {
if(log.isDebugEnabled()) log.debug("Completing now for loop {} and timeout slot {}", j, timeoutSlots);
next.complete(null);
},
splitTimeout,
TimeUnit.MILLISECONDS);
} else {
scheduledTimeout =
LazySharedExecutor.executor.schedule(
() -> {
next.complete(null);
if (future.isDone()) {
return;
}
if(log.isDebugEnabled()) log.warn("Throwing timeout exception after {} {} with timeout slot {}",
timeout,
unit,
timeoutSlots);
future.completeExceptionally(new TimeoutException("Timeout after " + timeout));
},
splitTimeout,
unit);
}
// If the completable future completes normally, don't bother timing it out.
// Also cleans the ref for GC.
future.whenComplete(
(r, exp) -> {
if (exp == null) {
scheduledTimeout.cancel(false);
if(log.isDebugEnabled()) log.debug("completing the future");
next.complete(null);
}
});
}
return next;
}
public <U> CompletableFuture<U> makeFutureWithTimeout(long timeout, TimeUnit units) {
final CompletableFuture<U> future = new CompletableFuture<>();
return withTimeout(future, timeout, units);
}
private void handleException() {
if (log.isDebugEnabled()) log.debug("handling the timeout in handleTimeoutException");
if (op != null) {
MemcachedConnection.opTimedOut(op);
op.timeOut();
ExecutionException t = null;
if (op.isTimedOut()) {
if (log.isDebugEnabled()) log.debug("Checked Operation timed out with operation {}.", op);
t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op));
} else if (op.isCancelled()) {
if (log.isDebugEnabled()) log.debug("Cancelled with operation {}.", op);
t = new ExecutionException(new CancellationException("Cancelled"));
} else if (op.hasErrored()) {
if (log.isDebugEnabled()) log.debug("Other exception with operation {}.", op);
t = new ExecutionException(op.getException());
}
throw new RuntimeException(t);
}
}
public CompletableFuture<T> getAsync(long timeout, TimeUnit units) {
CompletableFuture<T> future = makeFutureWithTimeout(timeout, units);
doAsyncGet(future);
return future.handle((data, ex) -> {
if (ex != null) {
handleException();
}
return data;
});
}
private void doAsyncGet(CompletableFuture<T> cf) {
EVCacheGetOperationListener<T> listener = future -> {
try {
T result = future.get();
cf.complete(result);
} catch (Exception t) {
cf.completeExceptionally(t);
}
};
this.addListener(listener);
}
public Single<T> get(long duration, TimeUnit units, boolean throwException, boolean hasZF, Scheduler scheduler) {
return observe().timeout(duration, units, Single.create(subscriber -> {
// whenever timeout occurs, continuous timeout counter will increase by 1.
MemcachedConnection.opTimedOut(op);
if (op != null) op.timeOut();
//if (!hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-CheckedOperationTimeout", DataSourceType.COUNTER).increment();
if (throwException) {
subscriber.onError(new CheckedOperationTimeoutException("Timed out waiting for operation", op));
} else {
if (isCancelled()) {
//if (hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-get-Cancelled", DataSourceType.COUNTER).increment();
}
subscriber.onSuccess(objRef.get());
}
}), scheduler).doAfterTerminate(new Action0() {
@Override
public void call() {
}
}
);
}
public void signalComplete() {
super.signalComplete();
}
/**
* Cancel this operation, if possible.
*
* @param ign not used
* @deprecated
* @return true if the operation has not yet been written to the network
*/
public boolean cancel(boolean ign) {
if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception());
return super.cancel(ign);
}
/**
* Cancel this operation, if possible.
*
* @return true if the operation has not yet been written to the network
*/
public boolean cancel() {
if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception());
return super.cancel();
}
public long getStartTime() {
return start;
}
}
| 769 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheAsciiOperationFactory.java
|
package com.netflix.evcache.operation;
import net.spy.memcached.protocol.ascii.AsciiOperationFactory;
import net.spy.memcached.protocol.ascii.ExecCmdOperation;
import net.spy.memcached.protocol.ascii.ExecCmdOperationImpl;
import net.spy.memcached.protocol.ascii.MetaDebugOperation;
import net.spy.memcached.protocol.ascii.MetaDebugOperationImpl;
import net.spy.memcached.protocol.ascii.MetaGetOperation;
import net.spy.memcached.protocol.ascii.MetaGetOperationImpl;
import net.spy.memcached.protocol.ascii.MetaArithmeticOperationImpl;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.MutatorOperation;
import net.spy.memcached.ops.OperationCallback;
public class EVCacheAsciiOperationFactory extends AsciiOperationFactory {
public MetaDebugOperation metaDebug(String key, MetaDebugOperation.Callback cb) {
return new MetaDebugOperationImpl(key, cb);
}
public MetaGetOperation metaGet(String key, MetaGetOperation.Callback cb) {
return new MetaGetOperationImpl(key, cb);
}
public ExecCmdOperation execCmd(String cmd, ExecCmdOperation.Callback cb) {
return new ExecCmdOperationImpl(cmd, cb);
}
public MutatorOperation mutate(Mutator m, String key, long by, long def,
int exp, OperationCallback cb) {
return new MetaArithmeticOperationImpl(m, key, by, def, exp, cb);
}
}
| 770 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheFuture.java
|
package com.netflix.evcache.operation;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
public class EVCacheFuture implements Future<Boolean> {
private static final Logger log = LoggerFactory.getLogger(EVCacheFuture.class);
private final Future<Boolean> future;
private final String app;
private final ServerGroup serverGroup;
private final String key;
private final EVCacheClient client;
public EVCacheFuture(Future<Boolean> future, String key, String app, ServerGroup serverGroup) {
this(future, key, app, serverGroup, null);
}
public EVCacheFuture(Future<Boolean> future, String key, String app, ServerGroup serverGroup, EVCacheClient client) {
this.future = future;
this.app = app;
this.serverGroup = serverGroup;
this.key = key;
this.client = client;
}
public Future<Boolean> getFuture() {
return future;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception());
return future.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return future.isCancelled();
}
@Override
public boolean isDone() {
return future.isDone();
}
@Override
public Boolean get() throws InterruptedException, ExecutionException {
return future.get();
}
@Override
public Boolean get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return future.get(timeout, unit);
}
public String getKey() {
return key;
}
public String getApp() {
return app;
}
public String getZone() {
return serverGroup.getZone();
}
public String getServerGroupName() {
return serverGroup.getName();
}
public EVCacheClient getEVCacheClient() {
return client;
}
@Override
public String toString() {
return "EVCacheFuture [future=" + future + ", app=" + app + ", ServerGroup="
+ serverGroup + ", EVCacheClient=" + client + "]";
}
}
| 771 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheBulkGetFuture.java
|
package com.netflix.evcache.operation;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.*;
import com.netflix.evcache.EVCacheGetOperationListener;
import net.spy.memcached.internal.BulkGetCompletionListener;
import net.spy.memcached.internal.CheckedOperationTimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import com.sun.management.GcInfo;
import net.spy.memcached.MemcachedConnection;
import net.spy.memcached.internal.BulkGetFuture;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationState;
import rx.Scheduler;
import rx.Single;
/**
* Future for handling results from bulk gets.
*
* Not intended for general use.
*
* types of objects returned from the GETBULK
*/
@SuppressWarnings("restriction")
public class EVCacheBulkGetFuture<T> extends BulkGetFuture<T> {
private static final Logger log = LoggerFactory.getLogger(EVCacheBulkGetFuture.class);
private final Map<String, Future<T>> rvMap;
private final Collection<Operation> ops;
private final CountDownLatch latch;
private final long start;
private final EVCacheClient client;
public EVCacheBulkGetFuture(Map<String, Future<T>> m, Collection<Operation> getOps, CountDownLatch l, ExecutorService service, EVCacheClient client) {
super(m, getOps, l, service);
rvMap = m;
ops = getOps;
latch = l;
this.start = System.currentTimeMillis();
this.client = client;
}
public Map<String, T> getSome(long to, TimeUnit unit, boolean throwException, boolean hasZF)
throws InterruptedException, ExecutionException {
boolean status = latch.await(to, unit);
if(log.isDebugEnabled()) log.debug("Took " + (System.currentTimeMillis() - start)+ " to fetch " + rvMap.size() + " keys from " + client);
long pauseDuration = -1;
List<Tag> tagList = null;
Collection<Operation> timedoutOps = null;
String statusString = EVCacheMetricsFactory.SUCCESS;
try {
if (!status) {
boolean gcPause = false;
tagList = new ArrayList<Tag>(7);
tagList.addAll(client.getTagList());
tagList.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, EVCacheMetricsFactory.BULK_OPERATION));
final RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
final long vmStartTime = runtimeBean.getStartTime();
final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean gcMXBean : gcMXBeans) {
if (gcMXBean instanceof com.sun.management.GarbageCollectorMXBean) {
final GcInfo lastGcInfo = ((com.sun.management.GarbageCollectorMXBean) gcMXBean).getLastGcInfo();
// If no GCs, there was no pause.
if (lastGcInfo == null) {
continue;
}
final long gcStartTime = lastGcInfo.getStartTime() + vmStartTime;
if (gcStartTime > start) {
gcPause = true;
if (log.isDebugEnabled()) log.debug("Total duration due to gc event = " + lastGcInfo.getDuration() + " msec.");
break;
}
}
}
// redo the same op once more since there was a chance of gc pause
if (gcPause) {
status = latch.await(to, unit);
tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, EVCacheMetricsFactory.GC));
if (log.isDebugEnabled()) log.debug("Retry status : " + status);
if (status) {
tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, EVCacheMetricsFactory.YES));
} else {
tagList.add(new BasicTag(EVCacheMetricsFactory.FETCH_AFTER_PAUSE, EVCacheMetricsFactory.NO));
}
} else {
tagList.add(new BasicTag(EVCacheMetricsFactory.PAUSE_REASON, EVCacheMetricsFactory.SCHEDULE));
}
pauseDuration = System.currentTimeMillis() - start;
if (log.isDebugEnabled()) log.debug("Total duration due to gc event = " + (System.currentTimeMillis() - start) + " msec.");
}
for (Operation op : ops) {
if (op.getState() != OperationState.COMPLETE) {
if (!status) {
MemcachedConnection.opTimedOut(op);
if(timedoutOps == null) timedoutOps = new HashSet<Operation>();
timedoutOps.add(op);
} else {
MemcachedConnection.opSucceeded(op);
}
} else {
MemcachedConnection.opSucceeded(op);
}
}
if (!status && !hasZF && (timedoutOps != null && timedoutOps.size() > 0)) statusString = EVCacheMetricsFactory.TIMEOUT;
for (Operation op : ops) {
if(op.isCancelled()) {
if (hasZF) statusString = EVCacheMetricsFactory.CANCELLED;
if (throwException) throw new ExecutionException(new CancellationException("Cancelled"));
}
if (op.hasErrored() && throwException) throw new ExecutionException(op.getException());
}
Map<String, T> m = new HashMap<String, T>();
for (Map.Entry<String, Future<T>> me : rvMap.entrySet()) {
m.put(me.getKey(), me.getValue().get());
}
return m;
} finally {
if(pauseDuration > 0) {
tagList.add(new BasicTag(EVCacheMetricsFactory.OPERATION_STATUS, statusString));
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_PAUSE, tagList, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getApp() + ".max.read.duration.metric", Integer.class)
.orElseGet("evcache.max.read.duration.metric").orElse(20).get().intValue())).record(pauseDuration, TimeUnit.MILLISECONDS);
}
}
}
public CompletableFuture<Map<String, T>> getSomeCompletableFuture(long to, TimeUnit unit, boolean throwException, boolean hasZF) {
CompletableFuture<Map<String, T>> completableFuture = new CompletableFuture<>();
try {
Map<String, T> value = getSome(to, unit, throwException, hasZF);
completableFuture.complete(value);
} catch (Exception e) {
completableFuture.completeExceptionally(e);
}
return completableFuture;
}
public Single<Map<String, T>> observe() {
return Single.create(subscriber ->
addListener(future -> {
try {
subscriber.onSuccess(get());
} catch (Throwable e) {
subscriber.onError(e);
}
})
);
}
public <U> CompletableFuture<U> makeFutureWithTimeout(long timeout, TimeUnit units) {
final CompletableFuture<U> future = new CompletableFuture<>();
return EVCacheOperationFuture.withTimeout(future, timeout, units);
}
public CompletableFuture<Map<String, T>> getAsyncSome(long timeout, TimeUnit units) {
CompletableFuture<Map<String, T>> future = makeFutureWithTimeout(timeout, units);
doAsyncGetSome(future);
return future.handle((data, ex) -> {
if (ex != null) {
handleBulkException();
}
return data;
});
}
public void handleBulkException() {
ExecutionException t = null;
for (Operation op : ops) {
if (op.getState() != OperationState.COMPLETE) {
if (op.isCancelled()) {
throw new RuntimeException(new ExecutionException(new CancellationException("Cancelled")));
}
else if (op.hasErrored()) {
throw new RuntimeException(new ExecutionException(op.getException()));
}
else {
op.timeOut();
MemcachedConnection.opTimedOut(op);
t = new ExecutionException(new CheckedOperationTimeoutException("Checked Operation timed out.", op));
}
} else {
MemcachedConnection.opSucceeded(op);
}
}
throw new RuntimeException(t);
}
public void doAsyncGetSome(CompletableFuture<Map<String, T>> promise) {
this.addListener(future -> {
try {
Map<String, T> m = new HashMap<>();
Map<String, ?> result = future.get();
for (Map.Entry<String, ?> me : result.entrySet()) {
m.put(me.getKey(), (T)me.getValue());
}
promise.complete(m);
} catch (Exception t) {
promise.completeExceptionally(t);
}
});
}
public Single<Map<String, T>> getSome(long to, TimeUnit units, boolean throwException, boolean hasZF, Scheduler scheduler) {
return observe().timeout(to, units, Single.create(subscriber -> {
try {
final Collection<Operation> timedoutOps = new HashSet<Operation>();
for (Operation op : ops) {
if (op.getState() != OperationState.COMPLETE) {
MemcachedConnection.opTimedOut(op);
timedoutOps.add(op);
} else {
MemcachedConnection.opSucceeded(op);
}
}
//if (!hasZF && timedoutOps.size() > 0) EVCacheMetricsFactory.getInstance().increment(client.getAppName() + "-getSome-CheckedOperationTimeout", client.getTagList());
for (Operation op : ops) {
if (op.isCancelled() && throwException) throw new ExecutionException(new CancellationException("Cancelled"));
if (op.hasErrored() && throwException) throw new ExecutionException(op.getException());
}
Map<String, T> m = new HashMap<String, T>();
for (Map.Entry<String, Future<T>> me : rvMap.entrySet()) {
m.put(me.getKey(), me.getValue().get());
}
subscriber.onSuccess(m);
} catch (Throwable e) {
subscriber.onError(e);
}
}), scheduler);
}
public String getZone() {
return client.getServerGroupName();
}
public ServerGroup getServerGroup() {
return client.getServerGroup();
}
public String getApp() {
return client.getAppName();
}
public Set<String> getKeys() {
return Collections.unmodifiableSet(rvMap.keySet());
}
public void signalComplete() {
super.signalComplete();
}
public boolean cancel(boolean ign) {
if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception());
return super.cancel(ign);
}
public long getStartTime() {
return start;
}
}
| 772 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheLatchImpl.java
|
package com.netflix.evcache.operation;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.event.EVCacheEvent;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.ServerGroup;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.ipc.IpcStatus;
import net.spy.memcached.internal.ListenableFuture;
import net.spy.memcached.internal.OperationCompletionListener;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.ops.StatusCode;
public class EVCacheLatchImpl implements EVCacheLatch, Runnable {
private static final Logger log = LoggerFactory.getLogger(EVCacheLatchImpl.class);
private final int expectedCompleteCount;
private final CountDownLatch latch;
private final List<Future<Boolean>> futures;
private final Policy policy;
private final int totalFutureCount;
private final long start;
private final String appName;
private EVCacheEvent evcacheEvent = null;
private boolean onCompleteDone = false;
private int completeCount = 0;
private int failureCount = 0;
private String failReason = null;
private ScheduledFuture<?> scheduledFuture;
public EVCacheLatchImpl(Policy policy, int _count, String appName) {
this.start = System.currentTimeMillis();
this.policy = policy;
this.futures = new ArrayList<Future<Boolean>>(_count);
this.appName = appName;
this.totalFutureCount = _count;
this.expectedCompleteCount = policyToCount(policy, _count);
this.latch = new CountDownLatch(expectedCompleteCount);
if (log.isDebugEnabled()) log.debug("Number of Futures = " + _count + "; Number of Futures that need to completed for Latch to be released = " + this.expectedCompleteCount);
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#await(long,java.util.concurrent.TimeUnit)
*/
@Override
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
if (log.isDebugEnabled()) log.debug("Current Latch Count = " + latch.getCount() + "; await for "+ timeout + " " + unit.name() + " appName : " + appName);
final long start = log.isDebugEnabled() ? System.currentTimeMillis() : 0;
final boolean awaitSuccess = latch.await(timeout, unit);
if (log.isDebugEnabled()) log.debug("await success = " + awaitSuccess + " after " + (System.currentTimeMillis() - start) + " msec." + " appName : " + appName + ((evcacheEvent != null) ? " keys : " + evcacheEvent.getEVCacheKeys() : ""));
return awaitSuccess;
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.evcache.operation.EVCacheLatchI#addFuture(net.spy.memcached.internal.ListenableFuture)
*/
public void addFuture(ListenableFuture<Boolean, OperationCompletionListener> future) {
future.addListener(this);
if (future.isDone()) countDown();
this.futures.add(future);
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#isDone()
*/
@Override
public boolean isDone() {
if (latch.getCount() == 0) return true;
return false;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#countDown()
*/
public void countDown() {
if (log.isDebugEnabled()) log.debug("Current Latch Count = " + latch.getCount() + "; Count Down.");
latch.countDown();
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getPendingCount()
*/
@Override
public int getPendingCount() {
if (log.isDebugEnabled()) log.debug("Pending Count = " + latch.getCount());
return (int) latch.getCount();
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getCompletedCount()
*/
@Override
public int getCompletedCount() {
if (log.isDebugEnabled()) log.debug("Completed Count = " + completeCount);
return completeCount;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getPendingFutures()
*/
@Override
public List<Future<Boolean>> getPendingFutures() {
final List<Future<Boolean>> returnFutures = new ArrayList<Future<Boolean>>(expectedCompleteCount);
for (Future<Boolean> future : futures) {
if (!future.isDone()) {
returnFutures.add(future);
}
}
return returnFutures;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getAllFutures()
*/
@Override
public List<Future<Boolean>> getAllFutures() {
return this.futures;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getCompletedFutures()
*/
@Override
public List<Future<Boolean>> getCompletedFutures() {
final List<Future<Boolean>> returnFutures = new ArrayList<Future<Boolean>>(expectedCompleteCount);
for (Future<Boolean> future : futures) {
if (future.isDone()) {
returnFutures.add(future);
}
}
return returnFutures;
}
private int policyToCount(Policy policy, int count) {
if (policy == null || count == 0) return 0;
switch (policy) {
case NONE:
return 0;
case ONE:
return 1;
case QUORUM:
if (count <= 2) return 1;
else return (futures.size() / 2) + 1;
case ALL_MINUS_1:
if (count <= 2) return 1;
else return count - 1;
default:
return count;
}
}
public void setEVCacheEvent(EVCacheEvent e) {
this.evcacheEvent = e;
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.evcache.operation.EVCacheLatchI#onComplete(net.spy.memcached.internal.OperationFuture)
*/
@Override
public void onComplete(OperationFuture<?> future) throws Exception {
if (log.isDebugEnabled()) log.debug("BEGIN : onComplete - Calling Countdown. Completed Future = " + future + "; App : " + appName);
countDown();
completeCount++;
if(evcacheEvent != null) {
if (log.isDebugEnabled()) log.debug(";App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount);
try {
if(future.isDone() && future.get().equals(Boolean.FALSE)) {
failureCount++;
if(failReason == null) failReason = EVCacheMetricsFactory.getInstance().getStatusCode(future.getStatus().getStatusCode());
}
} catch (Exception e) {
failureCount++;
if(failReason == null) failReason = IpcStatus.unexpected_error.name();
if(log.isDebugEnabled()) log.debug(e.getMessage(), e);
}
if(!onCompleteDone && getCompletedCount() >= getExpectedSuccessCount()) {
if(evcacheEvent.getClients().size() > 0) {
for(EVCacheClient client : evcacheEvent.getClients()) {
final List<EVCacheEventListener> evcacheEventListenerList = client.getPool().getEVCacheClientPoolManager().getEVCacheEventListeners();
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
evcacheEventListener.onComplete(evcacheEvent);
}
onCompleteDone = true;//This ensures we fire onComplete only once
break;
}
}
}
if(scheduledFuture != null) {
final boolean futureCancelled = scheduledFuture.isCancelled();
if (log.isDebugEnabled()) log.debug("App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount + "; futureCancelled : " + futureCancelled);
if(onCompleteDone && !futureCancelled) {
if(completeCount == totalFutureCount && failureCount == 0) { // all futures are completed
final boolean status = scheduledFuture.cancel(true);
run();//TODO: should we reschedule this method to run as part of EVCacheScheduledExecutor instead of running on the callback thread
if (log.isDebugEnabled()) log.debug("Cancelled the scheduled task : " + status);
}
}
}
if (log.isDebugEnabled()) log.debug("App : " + evcacheEvent.getAppName() + "; Call : " + evcacheEvent.getCall() + "; Keys : " + evcacheEvent.getEVCacheKeys() + "; completeCount : " + completeCount + "; totalFutureCount : " + totalFutureCount +"; failureCount : " + failureCount);
}
if(totalFutureCount == completeCount) {
final List<Tag> tags = new ArrayList<Tag>(5);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, appName);
if(evcacheEvent != null) tags.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, evcacheEvent.getCall().name()));
tags.add(new BasicTag(EVCacheMetricsFactory.FAIL_COUNT, String.valueOf(failureCount)));
tags.add(new BasicTag(EVCacheMetricsFactory.COMPLETE_COUNT, String.valueOf(completeCount)));
if(failReason != null) tags.add(new BasicTag(EVCacheMetricsFactory.IPC_STATUS, failReason));
//tags.add(new BasicTag(EVCacheMetricsFactory.OPERATION, EVCacheMetricsFactory.CALLBACK));
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_LATCH, tags, Duration.ofMillis(EVCacheConfig.getInstance().getPropertyRepository().get(getAppName() + ".max.write.duration.metric", Integer.class)
.orElseGet("evcache.max.write.duration.metric").orElse(50).get().intValue())).record(System.currentTimeMillis()- start, TimeUnit.MILLISECONDS);
}
if (log.isDebugEnabled()) log.debug("END : onComplete - Calling Countdown. Completed Future = " + future + "; App : " + appName);
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getFailureCount()
*/
@Override
public int getFailureCount() {
int fail = 0;
for (Future<Boolean> future : futures) {
try {
if (future.isDone() && future.get().equals(Boolean.FALSE)) {
fail++;
}
} catch (Exception e) {
fail++;
log.error(e.getMessage(), e);
}
}
return fail;
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.evcache.operation.EVCacheLatchI#getExpectedCompleteCount()
*/
@Override
public int getExpectedCompleteCount() {
return this.expectedCompleteCount;
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.evcache.operation.EVCacheLatchI#getExpectedSuccessCount()
*/
@Override
public int getExpectedSuccessCount() {
return this.expectedCompleteCount;
}
/*
* (non-Javadoc)
*
* @see com.netflix.evcache.operation.EVCacheLatchI#getSuccessCount()
*/
@Override
public int getSuccessCount() {
int success = 0;
for (Future<Boolean> future : futures) {
try {
if (future.isDone() && future.get().equals(Boolean.TRUE)) {
success++;
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
return success;
}
public String getAppName() {
return appName;
}
public Policy getPolicy() {
return this.policy;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{\"AppName\":\"");
builder.append(getAppName());
builder.append("\",\"isDone\":\"");
builder.append(isDone());
builder.append("\",\"Pending Count\":\"");
builder.append(getPendingCount());
builder.append("\",\"Completed Count\":\"");
builder.append(getCompletedCount());
builder.append("\",\"Pending Futures\":\"");
builder.append(getPendingFutures());
builder.append("\",\"All Futures\":\"");
builder.append(getAllFutures());
builder.append("\",\"Completed Futures\":\"");
builder.append(getCompletedFutures());
builder.append("\",\"Failure Count\":\"");
builder.append(getFailureCount());
builder.append("\",\"Success Count\":\"");
builder.append(getSuccessCount());
builder.append("\",\"Excpected Success Count\":\"");
builder.append(getExpectedSuccessCount());
builder.append("\"}");
return builder.toString();
}
@Override
public int getPendingFutureCount() {
int count = 0;
for (Future<Boolean> future : futures) {
if (!future.isDone()) {
count++;
}
}
return count;
}
@Override
public int getCompletedFutureCount() {
int count = 0;
for (Future<Boolean> future : futures) {
if (future.isDone()) {
count++;
}
}
return count;
}
public boolean isFastFailure() {
return (totalFutureCount == 0);
}
@SuppressWarnings("unchecked")
@Override
public void run() {
if(evcacheEvent != null) {
int failCount = 0, completeCount = 0;
for (Future<Boolean> future : futures) {
boolean fail = false;
try {
if(future.isDone()) {
fail = future.get(0, TimeUnit.MILLISECONDS).equals(Boolean.FALSE);
} else {
long delayms = 0;
if(scheduledFuture != null) {
delayms = scheduledFuture.getDelay(TimeUnit.MILLISECONDS);
}
if(delayms < 0 ) delayms = 0;//making sure wait is not negative. It might be ok but as this is implementation dependent let us stick with 0
fail = future.get(delayms, TimeUnit.MILLISECONDS).equals(Boolean.FALSE);
}
} catch (Exception e) {
fail = true;
if(log.isDebugEnabled()) log.debug(e.getMessage(), e);
}
if (fail) {
if(future instanceof EVCacheOperationFuture) {
final EVCacheOperationFuture<Boolean> evcFuture = (EVCacheOperationFuture<Boolean>)future;
final StatusCode code = evcFuture.getStatus().getStatusCode();
if(code != StatusCode.SUCCESS && code != StatusCode.ERR_NOT_FOUND && code != StatusCode.ERR_EXISTS) {
List<ServerGroup> listOfFailedServerGroups = (List<ServerGroup>) evcacheEvent.getAttribute("FailedServerGroups");
if(listOfFailedServerGroups == null) {
listOfFailedServerGroups = new ArrayList<ServerGroup>(failCount);
evcacheEvent.setAttribute("FailedServerGroups", listOfFailedServerGroups);
}
listOfFailedServerGroups.add(evcFuture.getServerGroup());
failCount++;
}
} else {
failCount++;
}
} else {
completeCount++;
}
}
if(log.isDebugEnabled()) log.debug("Fail Count : " + failCount);
if(failCount > 0) {
if(evcacheEvent.getClients().size() > 0) {
for(EVCacheClient client : evcacheEvent.getClients()) {
final List<EVCacheEventListener> evcacheEventListenerList = client.getPool().getEVCacheClientPoolManager().getEVCacheEventListeners();
if(log.isDebugEnabled()) log.debug("\nClient : " + client +"\nEvcacheEventListenerList : " + evcacheEventListenerList);
for (EVCacheEventListener evcacheEventListener : evcacheEventListenerList) {
evcacheEventListener.onError(evcacheEvent, null);
}
break;
}
}
}
final List<Tag> tags = new ArrayList<Tag>(5);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, appName);
if(evcacheEvent != null) tags.add(new BasicTag(EVCacheMetricsFactory.CALL_TAG, evcacheEvent.getCall().name()));
//tags.add(new BasicTag(EVCacheMetricsFactory.OPERATION, EVCacheMetricsFactory.VERIFY));
tags.add(new BasicTag(EVCacheMetricsFactory.FAIL_COUNT, String.valueOf(failCount)));
tags.add(new BasicTag(EVCacheMetricsFactory.COMPLETE_COUNT, String.valueOf(completeCount)));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.INTERNAL_LATCH_VERIFY, tags);
}
}
@Override
public int hashCode() {
return ((evcacheEvent == null) ? 0 : evcacheEvent.hashCode());
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EVCacheLatchImpl other = (EVCacheLatchImpl) obj;
if (appName == null) {
if (other.appName != null)
return false;
} else if (!appName.equals(other.appName))
return false;
if (evcacheEvent == null) {
if (other.evcacheEvent != null)
return false;
} else if (!evcacheEvent.equals(other.evcacheEvent))
return false;
return true;
}
public void setScheduledFuture(ScheduledFuture<?> scheduledFuture) {
this.scheduledFuture = scheduledFuture;
}
public void scheduledFutureValidation() {
if(evcacheEvent != null) {
final EVCacheClientPool pool = evcacheEvent.getEVCacheClientPool();
final ScheduledFuture<?> scheduledFuture = pool.getEVCacheClientPoolManager().getEVCacheScheduledExecutor().schedule(this, pool.getOperationTimeout().get(), TimeUnit.MILLISECONDS);
setScheduledFuture(scheduledFuture);
} else {
if(log.isWarnEnabled()) log.warn("Future cannot be scheduled as EVCacheEvent is null!");
}
}
}
| 773 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheItemMetaData.java
|
package com.netflix.evcache.operation;
/**
* <B><u>Meta </u></B>
* <br>
* The meta debug command is a human readable dump of all available internal
* metadata of an item, minus the value.<br>
* <br>
* <b><i>me <key>r\n</i></b><br>
* <br>
* <key> means one key string.<br>
* <br>
* The response looks like:<br>
* <br>
* <b><i>ME <key> <k>=<v>*\r\nEN\r\n</i></b><br>
* <br>
* For Ex: <br>
* <pre>
*
me img:bil:360465414627441161
ME img:bil:360465414627441161 exp=-549784 la=55016 cas=0 fetch=yes cls=5 size=237
EN
</pre>
* <br>
* Each of the keys and values are the internal data for the item.<br>
* <br>
* exp = expiration time<br>
* la = time in seconds since last access<br>
* cas = CAS ID<br>
* fetch = whether an item has been fetched before<br>
* cls = slab class id<br>
* size = total size in bytes<br>
* <br>
* @author smadappa
*
*/
public class EVCacheItemMetaData {
public long secondsLeftToExpire;
public long secondsSinceLastAccess;
public long cas;
public boolean hasBeenFetchedAfterWrite;
public int slabClass;
public int sizeInBytes;
public EVCacheItemMetaData() {
super();
}
public void setSecondsLeftToExpire(long secondsLeftToExpire) {
this.secondsLeftToExpire = secondsLeftToExpire;
}
public void setSecondsSinceLastAccess(long secondsSinceLastAccess) {
this.secondsSinceLastAccess = secondsSinceLastAccess;
}
public void setCas(long cas) {
this.cas = cas;
}
public void setHasBeenFetchedAfterWrite(boolean hasBeenFetchedAfterWrite) {
this.hasBeenFetchedAfterWrite = hasBeenFetchedAfterWrite;
}
public void setSlabClass(int slabClass) {
this.slabClass = slabClass;
}
public void setSizeInBytes(int sizeInBytes) {
this.sizeInBytes = sizeInBytes;
}
public long getSecondsLeftToExpire() {
return secondsLeftToExpire;
}
public long getSecondsSinceLastAccess() {
return secondsSinceLastAccess;
}
public long getCas() {
return cas;
}
public boolean isHasBeenFetchedAfterWrite() {
return hasBeenFetchedAfterWrite;
}
public int getSlabClass() {
return slabClass;
}
public int getSizeInBytes() {
return sizeInBytes;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (cas ^ (cas >>> 32));
result = prime * result + (hasBeenFetchedAfterWrite ? 1231 : 1237);
result = prime * result + (int) (secondsLeftToExpire ^ (secondsLeftToExpire >>> 32));
result = prime * result + (int) (secondsSinceLastAccess ^ (secondsSinceLastAccess >>> 32));
result = prime * result + sizeInBytes;
result = prime * result + slabClass;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EVCacheItemMetaData other = (EVCacheItemMetaData) obj;
if (cas != other.cas)
return false;
if (hasBeenFetchedAfterWrite != other.hasBeenFetchedAfterWrite)
return false;
if (secondsLeftToExpire != other.secondsLeftToExpire)
return false;
if (secondsSinceLastAccess != other.secondsSinceLastAccess)
return false;
if (sizeInBytes != other.sizeInBytes)
return false;
if (slabClass != other.slabClass)
return false;
return true;
}
@Override
public String toString() {
return "EVCacheItemMetaData [secondsLeftToExpire=" + secondsLeftToExpire + ", secondsSinceLastAccess="
+ secondsSinceLastAccess + ", cas=" + cas + ", hasBeenFetchedAfterWrite=" + hasBeenFetchedAfterWrite
+ ", slabClass=" + slabClass + ", sizeInBytes=" + sizeInBytes + "]";
}
}
| 774 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/operation/EVCacheFutures.java
|
package com.netflix.evcache.operation;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.evcache.EVCacheLatch;
import com.netflix.evcache.pool.ServerGroup;
import net.spy.memcached.internal.ListenableFuture;
import net.spy.memcached.internal.OperationCompletionListener;
import net.spy.memcached.internal.OperationFuture;
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "DE_MIGHT_IGNORE", "EI_EXPOSE_REP2" })
public class EVCacheFutures implements ListenableFuture<Boolean, OperationCompletionListener>,
OperationCompletionListener {
private static final Logger log = LoggerFactory.getLogger(EVCacheFutures.class);
private final OperationFuture<Boolean>[] futures;
private final String app;
private final ServerGroup serverGroup;
private final String key;
private final AtomicInteger completionCounter;
private final EVCacheLatch latch;
public EVCacheFutures(OperationFuture<Boolean>[] futures, String key, String app, ServerGroup serverGroup, EVCacheLatch latch) {
this.futures = futures;
this.app = app;
this.serverGroup = serverGroup;
this.key = key;
this.latch = latch;
this.completionCounter = new AtomicInteger(futures.length);
if (latch != null && latch instanceof EVCacheLatchImpl) ((EVCacheLatchImpl) latch).addFuture(this);
for (int i = 0; i < futures.length; i++) {
final OperationFuture<Boolean> of = futures[i];
if (of.isDone()) {
try {
onComplete(of);
} catch (Exception e) {
}
} else {
of.addListener(this);
}
}
}
public boolean cancel(boolean mayInterruptIfRunning) {
if(log.isDebugEnabled()) log.debug("Operation cancelled", new Exception());
for (OperationFuture<Boolean> future : futures) {
future.cancel();
}
return true;
}
@Override
public boolean isCancelled() {
for (OperationFuture<Boolean> future : futures) {
if (future.isCancelled() == false) return false;
}
return true;
}
@Override
public boolean isDone() {
for (OperationFuture<Boolean> future : futures) {
if (future.isDone() == false) return false;
}
return true;
}
@Override
public Boolean get() throws InterruptedException, ExecutionException {
for (OperationFuture<Boolean> future : futures) {
if (future.get() == false) return false;
}
return true;
}
@Override
public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
for (OperationFuture<Boolean> future : futures) {
if (future.get(timeout, unit) == false) return false;
}
return true;
}
public String getKey() {
return key;
}
public String getApp() {
return app;
}
public String getZone() {
return serverGroup.getZone();
}
public String getServerGroupName() {
return serverGroup.getName();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("EVCacheFutures [futures=[");
for (OperationFuture<Boolean> future : futures)
sb.append(future);
sb.append("], app=").append(app).append(", ServerGroup=").append(serverGroup.toString()).append("]");
return sb.toString();
}
@Override
public void onComplete(OperationFuture<?> future) throws Exception {
int val = completionCounter.decrementAndGet();
if (val == 0) {
if (latch != null) latch.onComplete(future);// Pass the last future to get completed
}
}
@Override
public Future<Boolean> addListener(OperationCompletionListener listener) {
return this;
}
@Override
public Future<Boolean> removeListener(OperationCompletionListener listener) {
return this;
}
}
| 775 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/metrics/EVCacheMetricsFactory.java
|
package com.netflix.evcache.metrics;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DistributionSummary;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.api.Tag;
import com.netflix.spectator.api.Timer;
import com.netflix.spectator.api.histogram.PercentileTimer;
import com.netflix.spectator.ipc.IpcStatus;
import net.spy.memcached.ops.StatusCode;
@SuppressWarnings("deprecation")
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings(value = { "NF_LOCAL_FAST_PROPERTY",
"PMB_POSSIBLE_MEMORY_BLOAT" }, justification = "Creates only when needed")
public final class EVCacheMetricsFactory {
private final Map<String, Number> monitorMap = new ConcurrentHashMap<String, Number>();
private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, DistributionSummary> distributionSummaryMap = new ConcurrentHashMap<String, DistributionSummary>();
private final Lock writeLock = (new ReentrantReadWriteLock()).writeLock();
private final Map<String, Timer> timerMap = new HashMap<String, Timer>();
private static final EVCacheMetricsFactory INSTANCE = new EVCacheMetricsFactory();
private EVCacheMetricsFactory() {
}
public static EVCacheMetricsFactory getInstance() {
return INSTANCE;
}
public Map<String, Counter> getAllCounters() {
return counterMap;
}
public Map<String, Timer> getAllTimers() {
return timerMap;
}
public Map<String, Number> getAllMonitor() {
return monitorMap;
}
public Map<String, DistributionSummary> getAllDistributionSummaryMap() {
return distributionSummaryMap;
}
public Registry getRegistry() {
return Spectator.globalRegistry();
}
public AtomicLong getLongGauge(String name) {
return getLongGauge(name, null);
}
public AtomicLong getLongGauge(String cName, Collection<Tag> tags) {
final String name = tags != null ? cName + tags.toString() : cName;
AtomicLong gauge = (AtomicLong)monitorMap.get(name);
if (gauge == null) {
writeLock.lock();
try {
if (monitorMap.containsKey(name)) {
gauge = (AtomicLong)monitorMap.get(name);
} else {
if(tags != null) {
final Id id = getId(cName, tags);
gauge = getRegistry().gauge(id, new AtomicLong());
} else {
final Id id = getId(cName, null);
gauge = getRegistry().gauge(id, new AtomicLong());
}
monitorMap.put(name, gauge);
}
} finally {
writeLock.unlock();
}
}
return gauge;
}
private void addCommonTags(List<Tag> tagList) {
tagList.add(new BasicTag(OWNER, "evcache"));
final String additionalTags = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.additional.tags", String.class).orElse(null).get();
if(additionalTags != null && additionalTags.length() > 0) {
final StringTokenizer st = new StringTokenizer(additionalTags, ",");
while(st.hasMoreTokens()) {
final String token = st.nextToken().trim();
String val = System.getProperty(token);
if(val == null) val = System.getenv(token);
if(val != null) tagList.add(new BasicTag(token, val));
}
}
}
public void addAppNameTags(List<Tag> tagList, String appName) {
tagList.add(new BasicTag(EVCacheMetricsFactory.CACHE, appName));
tagList.add(new BasicTag(EVCacheMetricsFactory.ID, appName));
}
public Id getId(String name, Collection<Tag> tags) {
final List<Tag> tagList = new ArrayList<Tag>();
if(tags != null) tagList.addAll(tags);
addCommonTags(tagList);
return getRegistry().createId(name, tagList);
}
public Counter getCounter(String cName, Collection<Tag> tags) {
final String name = tags != null ? cName + tags.toString() : cName;
Counter counter = counterMap.get(name);
if (counter == null) {
writeLock.lock();
try {
if (counterMap.containsKey(name)) {
counter = counterMap.get(name);
} else {
List<Tag> tagList = new ArrayList<Tag>(tags.size() + 1);
tagList.addAll(tags);
final Id id = getId(cName, tagList);
counter = getRegistry().counter(id);
counterMap.put(name, counter);
}
} finally {
writeLock.unlock();
}
}
return counter;
}
public Counter getCounter(String name) {
return getCounter(name, null);
}
public void increment(String name) {
final Counter counter = getCounter(name);
counter.increment();
}
public void increment(String cName, Collection<Tag> tags) {
final Counter counter = getCounter(cName, tags);
counter.increment();
}
@Deprecated
public Timer getPercentileTimer(String metric, Collection<Tag> tags) {
return getPercentileTimer(metric, tags, Duration.ofMillis(100));
}
public Timer getPercentileTimer(String metric, Collection<Tag> tags, Duration max) {
final String name = tags != null ? metric + tags.toString() : metric;
final Timer duration = timerMap.get(name);
if (duration != null) return duration;
writeLock.lock();
try {
if (timerMap.containsKey(name))
return timerMap.get(name);
else {
Id id = getId(metric, tags);
final Timer _duration = PercentileTimer.builder(getRegistry()).withId(id).withRange(Duration.ofNanos(100000), max).build();
timerMap.put(name, _duration);
return _duration;
}
} finally {
writeLock.unlock();
}
}
public DistributionSummary getDistributionSummary(String name, Collection<Tag> tags) {
final String metricName = (tags != null ) ? name + tags.toString() : name;
final DistributionSummary _ds = distributionSummaryMap.get(metricName);
if(_ds != null) return _ds;
final Registry registry = Spectator.globalRegistry();
if (registry != null) {
Id id = getId(name, tags);
final DistributionSummary ds = registry.distributionSummary(id);
distributionSummaryMap.put(metricName, ds);
return ds;
}
return null;
}
public String getStatusCode(StatusCode sc) {
switch(sc) {
case CANCELLED :
return IpcStatus.cancelled.name();
case TIMEDOUT :
return IpcStatus.timeout.name();
case INTERRUPTED :
return EVCacheMetricsFactory.INTERRUPTED;
case SUCCESS :
return IpcStatus.success.name();
case ERR_NOT_FOUND:
return "not_found";
case ERR_EXISTS:
return "exists";
case ERR_2BIG:
return "too_big";
case ERR_INVAL:
return "invalid";
case ERR_NOT_STORED:
return "not_stored";
case ERR_DELTA_BADVAL:
return "bad_value";
case ERR_NOT_MY_VBUCKET:
return "not_my_vbucket";
case ERR_UNKNOWN_COMMAND:
return "unknown_command";
case ERR_NO_MEM:
return "no_mem";
case ERR_NOT_SUPPORTED:
return "not_supported";
case ERR_INTERNAL:
return "error_internal";
case ERR_BUSY:
return "error_busy";
case ERR_TEMP_FAIL:
return "temp_failure";
case ERR_CLIENT :
return "error_client";
default :
return sc.name().toLowerCase();
}
}
/**
* External Metric Names
*/
public static final String OVERALL_CALL = "evcache.client.call";
public static final String OVERALL_KEYS_SIZE = "evcache.client.call.keys.size";
public static final String COMPRESSION_RATIO = "evcache.client.compression.ratio";
/**
* External IPC Metric Names
*/
public static final String IPC_CALL = "ipc.client.call";
public static final String IPC_SIZE_INBOUND = "ipc.client.call.size.inbound";
public static final String IPC_SIZE_OUTBOUND = "ipc.client.call.size.outbound";
public static final String OWNER = "owner";
public static final String ID = "id";
/**
* Internal Metric Names
*/
public static final String CONFIG = "internal.evc.client.config";
public static final String DATA_SIZE = "internal.evc.client.datasize";
public static final String IN_MEMORY = "internal.evc.client.inmemorycache";
public static final String FAST_FAIL = "internal.evc.client.fastfail";
public static final String INTERNAL_OPERATION = "internal.evc.client.operation";
public static final String INTERNAL_PAUSE = "internal.evc.client.pause";
public static final String INTERNAL_LATCH = "internal.evc.client.latch";
public static final String INTERNAL_LATCH_VERIFY = "internal.evc.client.latch.verify";
public static final String INTERNAL_FAIL = "internal.evc.client.fail";
public static final String INTERNAL_EVENT_FAIL = "internal.evc.client.event.fail";
public static final String INTERNAL_RECONNECT = "internal.evc.client.reconnect";
public static final String INTERNAL_EXECUTOR = "internal.evc.client.executor";
public static final String INTERNAL_EXECUTOR_SCHEDULED = "internal.evc.client.scheduledExecutor";
public static final String INTERNAL_POOL_INIT_ERROR = "internal.evc.client.init.error";
public static final String INTERNAL_NUM_CHUNK_SIZE = "internal.evc.client.chunking.numOfChunks";
public static final String INTERNAL_CHUNK_DATA_SIZE = "internal.evc.client.chunking.dataSize";
public static final String INTERNAL_ADD_CALL_FIXUP = "internal.evc.client.addCall.fixUp";
public static final String INTERNAL_POOL_SG_CONFIG = "internal.evc.client.pool.asg.config";
public static final String INTERNAL_POOL_CONFIG = "internal.evc.client.pool.config";
public static final String INTERNAL_POOL_REFRESH = "internal.evc.client.pool.refresh";
public static final String INTERNAL_BOOTSTRAP_EUREKA = "internal.evc.client.pool.bootstrap.eureka";
public static final String INTERNAL_STATS = "internal.evc.client.stats";
public static final String INTERNAL_TTL = "internal.evc.item.ttl";
/*
* Internal pool config values
*/
public static final String POOL_READ_INSTANCES = "readInstances";
public static final String POOL_WRITE_INSTANCES = "writeInstances";
public static final String POOL_RECONCILE = "reconcile";
public static final String POOL_CHANGED = "asgChanged";
public static final String POOL_SERVERGROUP_STATUS = "asgStatus";
public static final String POOL_READ_Q_SIZE = "readQueue";
public static final String POOL_WRITE_Q_SIZE = "writeQueue";
public static final String POOL_REFRESH_QUEUE_FULL = "refreshOnQueueFull";
public static final String POOL_REFRESH_ASYNC = "refreshAsync";
public static final String POOL_OPERATIONS = "operations";
/**
* Metric Tags Names
*/
public static final String CACHE = "ipc.server.app";
public static final String SERVERGROUP = "ipc.server.asg";
public static final String ZONE = "ipc.server.zone";
public static final String ATTEMPT = "ipc.attempt";
public static final String IPC_RESULT = "ipc.result";
public static final String IPC_STATUS = "ipc.status";
//public static final String FAIL_REASON = "ipc.error.group";
/*
* Metric Tags moved to IPC format
*/
public static final String CALL_TAG = "evc.call";
public static final String CALL_TYPE_TAG = "evc.call.type";
public static final String CACHE_HIT = "evc.cache.hit";
public static final String CONNECTION_ID = "evc.connection.id";
public static final String TTL = "evc.ttl";
public static final String PAUSE_REASON = "evc.pause.reason";
public static final String LATCH = "evc.latch";
public static final String FAIL_COUNT = "evc.fail.count";
public static final String COMPLETE_COUNT = "evc.complete.count";
public static final String RECONNECT_COUNT = "evc.reconnect.count";
public static final String FETCH_AFTER_PAUSE = "evc.fetch.after.pause";
public static final String FAILED_SERVERGROUP = "evc.failed.asg";
public static final String CONFIG_NAME = "evc.config";
public static final String STAT_NAME = "evc.stat.name";
public static final String FAILED_HOST = "evc.failed.host";
public static final String OPERATION = "evc.operation";
public static final String OPERATION_STATUS = "evc.operation.status";
public static final String NUMBER_OF_ATTEMPTS = "evc.attempts";
public static final String NUMBER_OF_KEYS = "evc.keys.count";
public static final String METRIC = "evc.metric";
public static final String FAILURE_REASON = "evc.fail.reason";
public static final String PREFIX = "evc.prefix";
public static final String EVENT = "evc.event";
public static final String EVENT_STAGE = "evc.event.stage";
public static final String CONNECTION = "evc.connection.type";
public static final String TLS = "evc.connection.tls";
public static final String COMPRESSION_TYPE = "evc.compression.type";
/**
* Metric Tags Values
*/
public static final String SIZE = "size";
public static final String PORT = "port";
public static final String CONNECT = "connect";
public static final String DISCONNECT = "disconnect";
public static final String SUCCESS = "success";
public static final String FAIL = "failure";
public static final String TIMEOUT = "timeout";
public static final String CHECKED_OP_TIMEOUT = "CheckedOperationTimeout";
public static final String CANCELLED = "cancelled";
public static final String THROTTLED = "throttled";
public static final String ERROR = "error";
public static final String READ = "read";
public static final String WRITE = "write";
public static final String YES = "yes";
public static final String NO = "no";
public static final String PARTIAL = "partial";
public static final String UNKNOWN = "unknown";
public static final String INTERRUPTED = "interrupted";
public static final String SCHEDULE = "Scheduling";
public static final String GC = "gc";
public static final String NULL_CLIENT = "nullClient";
public static final String INVALID_TTL = "invalidTTL";
public static final String NULL_ZONE = "nullZone";
public static final String NULL_SERVERGROUP = "nullASG";
public static final String RECONNECT = "reconnect";
public static final String CALLBACK = "callback";
public static final String VERIFY = "verify";
public static final String READ_QUEUE_FULL = "readQueueFull";
public static final String INACTIVE_NODE = "inactiveNode";
public static final String IGNORE_INACTIVE_NODES = "ignoreInactiveNode";
public static final String INCORRECT_CHUNKS = "incorrectNumOfChunks";
public static final String INVALID_CHUNK_SIZE = "invalidChunkSize";
public static final String CHECK_SUM_ERROR = "checkSumError";
public static final String KEY_HASH_COLLISION = "KeyHashCollision";
public static final String NUM_CHUNK_SIZE = "numOfChunks";
public static final String CHUNK_DATA_SIZE = "dataSize";
public static final String NOT_AVAILABLE = "notAvailable";
public static final String NOT_ACTIVE = "notActive";
public static final String WRONG_KEY_RETURNED = "wrongKeyReturned";
public static final String INITIAL = "initial";
public static final String SECOND = "second";
public static final String THIRD_UP = "third_up";
/**
* Metric Tag Value for Operations
*/
public static final String BULK_OPERATION = "BULK";
public static final String GET_OPERATION = "GET";
public static final String GET_AND_TOUCH_OPERATION = "GET_AND_TOUCH";
public static final String DELETE_OPERATION = "DELETE";
public static final String TOUCH_OPERATION = "TOUCH";
public static final String AOA_OPERATION = "APPEND_OR_ADD";
public static final String AOA_OPERATION_APPEND = "APPEND_OR_ADD-APPEND";
public static final String AOA_OPERATION_ADD = "APPEND_OR_ADD-ADD";
public static final String AOA_OPERATION_REAPPEND = "APPEND_OR_ADD-RETRY-APPEND";
public static final String SET_OPERATION = "SET";
public static final String ADD_OPERATION = "ADD";
public static final String REPLACE_OPERATION = "REPLACE";
public static final String META_GET_OPERATION = "M_GET";
public static final String META_SET_OPERATION = "M_SET";
public static final String META_DEBUG_OPERATION = "M_DEBUG";
}
| 776 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/BaseConnectionFactory.java
|
package com.netflix.evcache.connection;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.EVCacheTranscoder;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheKetamaNodeLocatorConfiguration;
import com.netflix.evcache.pool.EVCacheNodeLocator;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.BinaryConnectionFactory;
import net.spy.memcached.ConnectionObserver;
import net.spy.memcached.DefaultHashAlgorithm;
import net.spy.memcached.EVCacheConnection;
import net.spy.memcached.FailureMode;
import net.spy.memcached.HashAlgorithm;
import net.spy.memcached.MemcachedConnection;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.NodeLocator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.protocol.binary.EVCacheNodeImpl;
import net.spy.memcached.transcoders.Transcoder;
public class BaseConnectionFactory extends BinaryConnectionFactory {
protected final String name;
protected final String appName;
protected final Property<Integer> operationTimeout;
protected final long opMaxBlockTime;
protected EVCacheNodeLocator locator;
protected final long startTime;
protected final EVCacheClient client;
protected final Property<String> failureMode;
BaseConnectionFactory(EVCacheClient client, int len, Property<Integer> _operationTimeout, long opMaxBlockTime) {
super(len, BinaryConnectionFactory.DEFAULT_READ_BUFFER_SIZE, DefaultHashAlgorithm.KETAMA_HASH);
this.opMaxBlockTime = opMaxBlockTime;
this.operationTimeout = _operationTimeout;
this.client = client;
this.startTime = System.currentTimeMillis();
this.appName = client.getAppName();
this.failureMode = client.getPool().getEVCacheClientPoolManager().getEVCacheConfig().getPropertyRepository().get(this.client.getServerGroupName() + ".failure.mode", String.class).orElseGet(appName + ".failure.mode").orElse("Retry");
this.name = appName + "-" + client.getServerGroupName() + "-" + client.getId();
}
public NodeLocator createLocator(List<MemcachedNode> list) {
this.locator = new EVCacheNodeLocator(client, list,
DefaultHashAlgorithm.KETAMA_HASH, new EVCacheKetamaNodeLocatorConfiguration(client));
return locator;
}
public EVCacheNodeLocator getEVCacheNodeLocator() {
return this.locator;
}
public long getMaxReconnectDelay() {
return super.getMaxReconnectDelay();
}
public int getOpQueueLen() {
return super.getOpQueueLen();
}
public int getReadBufSize() {
return super.getReadBufSize();
}
public BlockingQueue<Operation> createOperationQueue() {
return new ArrayBlockingQueue<Operation>(getOpQueueLen());
}
public MemcachedConnection createConnection(List<InetSocketAddress> addrs) throws IOException {
return new EVCacheConnection(name, getReadBufSize(), this, addrs, getInitialObservers(), getFailureMode(),
getOperationFactory());
}
public MemcachedNode createMemcachedNode(SocketAddress sa, SocketChannel c, int bufSize) {
boolean doAuth = false;
final EVCacheNodeImpl node = new EVCacheNodeImpl(sa, c, bufSize, createReadOperationQueue(),
createWriteOperationQueue(), createOperationQueue(),
opMaxBlockTime, doAuth, getOperationTimeout(), getAuthWaitTime(), this, client,
startTime);
node.registerMonitors();
return node;
}
public long getOperationTimeout() {
return operationTimeout.get();
}
public BlockingQueue<Operation> createReadOperationQueue() {
return super.createReadOperationQueue();
}
public BlockingQueue<Operation> createWriteOperationQueue() {
return super.createWriteOperationQueue();
}
public Transcoder<Object> getDefaultTranscoder() {
return new EVCacheTranscoder();
}
public FailureMode getFailureMode() {
try {
return FailureMode.valueOf(failureMode.get());
} catch (IllegalArgumentException ex) {
return FailureMode.Cancel;
}
}
public HashAlgorithm getHashAlg() {
return super.getHashAlg();
}
public Collection<ConnectionObserver> getInitialObservers() {
return super.getInitialObservers();
}
public boolean isDaemon() {
return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.thread.daemon", Boolean.class).orElse(super.isDaemon()).get();
}
public boolean shouldOptimize() {
return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.broadcast.base.connection.optimize", Boolean.class).orElse(true).get();
}
public boolean isDefaultExecutorService() {
return false;
}
public ExecutorService getListenerExecutorService() {
return client.getPool().getEVCacheClientPoolManager().getEVCacheExecutor();
}
public int getId() {
return client.getId();
}
public String getZone() {
return client.getServerGroup().getZone();
}
public String getServerGroupName() {
return client.getServerGroup().getName();
}
public String getReplicaSetName() {
return client.getServerGroup().getName();
}
public String getAppName() {
return this.appName;
}
public String toString() {
return name;
}
public EVCacheClientPoolManager getEVCacheClientPoolManager() {
return this.client.getPool().getEVCacheClientPoolManager();
}
public EVCacheClientPool getEVCacheClientPool() {
return this.client.getPool();
}
public EVCacheClient getEVCacheClient() {
return this.client;
}
}
| 777 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/IConnectionBuilder.java
|
package com.netflix.evcache.connection;
import com.netflix.evcache.pool.EVCacheClient;
import net.spy.memcached.ConnectionFactory;
public interface IConnectionBuilder {
ConnectionFactory getConnectionFactory(EVCacheClient client);
}
| 778 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/BaseAsciiConnectionFactory.java
|
package com.netflix.evcache.connection;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.channels.SocketChannel;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.EVCacheTranscoder;
import com.netflix.evcache.operation.EVCacheAsciiOperationFactory;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.pool.EVCacheClientPool;
import com.netflix.evcache.pool.EVCacheClientPoolManager;
import com.netflix.evcache.pool.EVCacheKetamaNodeLocatorConfiguration;
import com.netflix.evcache.pool.EVCacheNodeLocator;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.ConnectionObserver;
import net.spy.memcached.DefaultConnectionFactory;
import net.spy.memcached.DefaultHashAlgorithm;
import net.spy.memcached.EVCacheConnection;
import net.spy.memcached.FailureMode;
import net.spy.memcached.HashAlgorithm;
import net.spy.memcached.MemcachedConnection;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.NodeLocator;
import net.spy.memcached.OperationFactory;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.protocol.ascii.AsciiOperationFactory;
import net.spy.memcached.protocol.ascii.EVCacheAsciiNodeImpl;
import net.spy.memcached.transcoders.Transcoder;
public class BaseAsciiConnectionFactory extends DefaultConnectionFactory {
protected final String name;
protected final String appName;
protected final Property<Integer> operationTimeout;
protected final long opMaxBlockTime;
protected EVCacheNodeLocator locator;
protected final long startTime;
protected final EVCacheClient client;
protected final Property<String> failureMode;
BaseAsciiConnectionFactory(EVCacheClient client, int len, Property<Integer> _operationTimeout, long opMaxBlockTime) {
super(len, DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE, DefaultHashAlgorithm.KETAMA_HASH);
this.opMaxBlockTime = opMaxBlockTime;
this.operationTimeout = _operationTimeout;
this.client = client;
this.startTime = System.currentTimeMillis();
this.appName = client.getAppName();
this.failureMode = client.getPool().getEVCacheClientPoolManager().getEVCacheConfig().getPropertyRepository().get(this.client.getServerGroupName() + ".failure.mode", String.class).orElseGet(appName + ".failure.mode").orElse("Retry");
this.name = appName + "-" + client.getServerGroupName() + "-" + client.getId();
}
public NodeLocator createLocator(List<MemcachedNode> list) {
this.locator = new EVCacheNodeLocator(client, list, DefaultHashAlgorithm.KETAMA_HASH, new EVCacheKetamaNodeLocatorConfiguration(client));
return locator;
}
public EVCacheNodeLocator getEVCacheNodeLocator() {
return this.locator;
}
public long getMaxReconnectDelay() {
return super.getMaxReconnectDelay();
}
public int getOpQueueLen() {
return super.getOpQueueLen();
}
public int getReadBufSize() {
return super.getReadBufSize();
}
public BlockingQueue<Operation> createOperationQueue() {
return new ArrayBlockingQueue<Operation>(getOpQueueLen());
}
public MemcachedConnection createConnection(List<InetSocketAddress> addrs) throws IOException {
return new EVCacheConnection(name, getReadBufSize(), this, addrs, getInitialObservers(), getFailureMode(),
getOperationFactory());
}
public EVCacheAsciiOperationFactory getOperationFactory() {
return new EVCacheAsciiOperationFactory();
}
public MemcachedNode createMemcachedNode(SocketAddress sa, SocketChannel c, int bufSize) {
boolean doAuth = false;
final EVCacheAsciiNodeImpl node = new EVCacheAsciiNodeImpl(sa, c, bufSize, createReadOperationQueue(),
createWriteOperationQueue(), createOperationQueue(),
opMaxBlockTime, doAuth, getOperationTimeout(), getAuthWaitTime(), this, client,
startTime);
node.registerMonitors();
return node;
}
public long getOperationTimeout() {
return operationTimeout.get();
}
public BlockingQueue<Operation> createReadOperationQueue() {
return super.createReadOperationQueue();
}
public BlockingQueue<Operation> createWriteOperationQueue() {
return super.createWriteOperationQueue();
}
public Transcoder<Object> getDefaultTranscoder() {
return new EVCacheTranscoder();
}
public FailureMode getFailureMode() {
try {
return FailureMode.valueOf(failureMode.get());
} catch (IllegalArgumentException ex) {
return FailureMode.Cancel;
}
}
public HashAlgorithm getHashAlg() {
return super.getHashAlg();
}
public Collection<ConnectionObserver> getInitialObservers() {
return super.getInitialObservers();
}
public boolean isDaemon() {
return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.thread.daemon", Boolean.class).orElse(super.isDaemon()).get();
}
public boolean shouldOptimize() {
return EVCacheConfig.getInstance().getPropertyRepository().get("evcache.broadcast.ascii.connection.optimize", Boolean.class).orElse(true).get();
}
public boolean isDefaultExecutorService() {
return false;
}
public ExecutorService getListenerExecutorService() {
return client.getPool().getEVCacheClientPoolManager().getEVCacheExecutor();
}
public int getId() {
return client.getId();
}
public String getZone() {
return client.getServerGroup().getZone();
}
public String getServerGroupName() {
return client.getServerGroup().getName();
}
public String getReplicaSetName() {
return client.getServerGroup().getName();
}
public String getAppName() {
return this.appName;
}
public String toString() {
return name;
}
public EVCacheClientPoolManager getEVCacheClientPoolManager() {
return this.client.getPool().getEVCacheClientPoolManager();
}
public EVCacheClientPool getEVCacheClientPool() {
return this.client.getPool();
}
public EVCacheClient getEVCacheClient() {
return this.client;
}
}
| 779 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/connection/ConnectionFactoryBuilder.java
|
package com.netflix.evcache.connection;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.pool.EVCacheClient;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.ConnectionFactory;
public class ConnectionFactoryBuilder implements IConnectionBuilder {
public ConnectionFactoryBuilder() {
}
public ConnectionFactory getConnectionFactory(EVCacheClient client) {
final String appName = client.getAppName();
final int maxQueueSize = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".max.queue.length", Integer.class).orElse(16384).get();
final Property<Integer> operationTimeout = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".operation.timeout", Integer.class).orElse(2500);
final int opQueueMaxBlockTime = EVCacheConfig.getInstance().getPropertyRepository().get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10).get();
final boolean useBinary = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.binary.protocol", Boolean.class).orElse(true).get();
if(useBinary) return new BaseConnectionFactory(client, maxQueueSize, operationTimeout, opQueueMaxBlockTime);
else return new BaseAsciiConnectionFactory(client, maxQueueSize, operationTimeout, opQueueMaxBlockTime);
}
}
| 780 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheConfig.java
|
package com.netflix.evcache.util;
import java.lang.reflect.Type;
import java.util.function.Consumer;
import java.util.function.Function;
import javax.inject.Inject;
import com.netflix.archaius.DefaultPropertyFactory;
import com.netflix.archaius.api.Property;
import com.netflix.archaius.api.PropertyListener;
import com.netflix.archaius.api.PropertyRepository;
import com.netflix.archaius.api.config.CompositeConfig;
import com.netflix.archaius.config.DefaultCompositeConfig;
import com.netflix.archaius.config.DefaultSettableConfig;
import com.netflix.archaius.config.EnvironmentConfig;
import com.netflix.archaius.config.SystemConfig;
import com.netflix.evcache.config.EVCachePersistedProperties;
public class EVCacheConfig {
private static EVCacheConfig INSTANCE;
/**
* This is an hack, should find a better way to do this
**/
private static PropertyRepository propertyRepository;
@Inject
public EVCacheConfig(PropertyRepository repository) {
PropertyRepository _propertyRepository = null;
if(repository == null) {
try {
final CompositeConfig applicationConfig = new DefaultCompositeConfig(true);
CompositeConfig remoteLayer = new DefaultCompositeConfig(true);
applicationConfig.addConfig("RUNTIME", new DefaultSettableConfig());
applicationConfig.addConfig("REMOTE", remoteLayer);
applicationConfig.addConfig("SYSTEM", SystemConfig.INSTANCE);
applicationConfig.addConfig("ENVIRONMENT", EnvironmentConfig.INSTANCE);
final EVCachePersistedProperties remote = new EVCachePersistedProperties();
remoteLayer.addConfig("remote-1", remote.getPollingDynamicConfig());
_propertyRepository = new DefaultPropertyFactory(applicationConfig);
} catch (Exception e) {
e.printStackTrace();
_propertyRepository = new DefaultPropertyFactory(new DefaultCompositeConfig());
}
} else {
_propertyRepository = repository;
}
propertyRepository = new EVCachePropertyRepository(_propertyRepository);
//propertyRepository = _propertyRepository;
INSTANCE = this;
}
private EVCacheConfig() {
this(null);
}
public static EVCacheConfig getInstance() {
if(INSTANCE == null) new EVCacheConfig();
return INSTANCE;
}
public PropertyRepository getPropertyRepository() {
return propertyRepository;
}
public static void setPropertyRepository(PropertyRepository repository) {
propertyRepository = repository;
}
class EVCachePropertyRepository implements PropertyRepository {
private final PropertyRepository delegate;
EVCachePropertyRepository(PropertyRepository delegate) {
this.delegate = delegate;
}
@Override
public <T> Property<T> get(String key, Class<T> type) {
return new EVCacheProperty<T>(delegate.get(key, type));
}
@Override
public <T> Property<T> get(String key, Type type) {
return new EVCacheProperty<T>(delegate.get(key, type));
}
}
class EVCacheProperty<T> implements Property<T> {
private final Property<T> property;
EVCacheProperty(Property<T> prop) {
property = prop;
}
@Override
public T get() {
return property.get();
}
@Override
public String getKey() {
return property.getKey();
}
@Override
public void addListener(PropertyListener<T> listener) {
// TODO Auto-generated method stub
property.addListener(listener);
}
@Override
public void removeListener(PropertyListener<T> listener) {
// TODO Auto-generated method stub
property.removeListener(listener);
}
@Override
public Subscription onChange(Consumer<T> consumer) {
// TODO Auto-generated method stub
return property.onChange(consumer);
}
@Override
public Subscription subscribe(Consumer<T> consumer) {
// TODO Auto-generated method stub
return property.subscribe(consumer);
}
@Override
public Property<T> orElse(T defaultValue) {
// TODO Auto-generated method stub
return new EVCacheProperty<T>(property.orElse(defaultValue));
}
@Override
public Property<T> orElseGet(String key) {
// TODO Auto-generated method stub
return new EVCacheProperty<T>(property.orElseGet(key));
}
@Override
public <S> Property<S> map(Function<T, S> mapper) {
// TODO Auto-generated method stub
return property.map(mapper);
}
@Override
public String toString() {
return "EVCacheProperty [Key=" + getKey() + ",value="+get() + "]";
}
}
}
| 781 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/Pair.java
|
package com.netflix.evcache.util;
public class Pair<E1, E2> {
public E1 first() {
return first;
}
public void setFirst(E1 first) {
this.first = first;
}
public E2 second() {
return second;
}
public void setSecond(E2 second) {
this.second = second;
}
private E1 first;
private E2 second;
public Pair(E1 first, E2 second) {
this.first = first;
this.second = second;
}
}
| 782 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/RetryCount.java
|
package com.netflix.evcache.util;
public class RetryCount {
private int retryCount;
public RetryCount() {
retryCount = 1;
}
public void incr() {
retryCount++;
}
public int get(){
return retryCount;
}
}
| 783 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/KeyHasher.java
|
package com.netflix.evcache.util;
import java.util.Arrays;
import java.util.Base64;
import java.util.Base64.Encoder;
import org.apache.log4j.BasicConfigurator;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.fzakaria.ascii85.Ascii85;
import com.google.common.base.Charsets;
import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.netflix.archaius.api.Property;
public class KeyHasher {
/**
* meta data size
* 40 + key + 'item_hdr' size
i.e.
40 + keysize + 12
And if client flags are present:
40 + keysize + 4 bytes(for flags) + 12
And if CAS and client flags are present:
40 + keysize + 4 bytes(for flags) + 8(for CAS) + 12
*/
public enum HashingAlgorithm {
murmur3,
adler32,
crc32,
sha1,
sha256,
siphash24,
md5,
NO_HASHING // useful for disabling hashing at client level, while Hashing is enabled at App level
}
public static HashingAlgorithm getHashingAlgorithmFromString(String algorithmStr) {
try {
if (null == algorithmStr || algorithmStr.isEmpty()) {
return null;
}
return HashingAlgorithm.valueOf(algorithmStr.toLowerCase());
} catch (IllegalArgumentException ex) {
// default to md5 incase of unsupported algorithm
return HashingAlgorithm.md5;
}
}
private static final Logger log = LoggerFactory.getLogger(KeyHasher.class);
private static final Encoder encoder= Base64.getEncoder().withoutPadding();
public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength) {
return getHashedKeyEncoded(key, hashingAlgorithm, maxDigestBytes, maxHashLength, null);
}
public static String getHashedKeyEncoded(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes, Integer maxHashLength, String baseEncoder) {
final long start = System.nanoTime();
byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes);
if(log.isDebugEnabled()) {
final char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray();
char[] hexChars = new char[digest.length * 2];
for (int j = 0; j < digest.length; j++) {
int v = digest[j] & 0xFF;
hexChars[j * 2] = HEX_ARRAY[v >>> 4];
hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F];
}
log.debug("Key : " + key +"; hex : " + new String(hexChars));
}
if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) );
String hKey = null;
if(baseEncoder != null && baseEncoder.equals("ascii85")) {
hKey = Ascii85.encode(digest);
if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & Ascii85 encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos");
} else {
hKey = encoder.encodeToString(digest);
if (null != hKey && maxHashLength != null && maxHashLength > 0 && maxHashLength < hKey.length()) {
hKey = hKey.substring(0, maxHashLength);
}
if(log.isDebugEnabled()) log.debug("Key : " + key +"; Hashed & encoded key : " + hKey + "; Took " + (System.nanoTime() - start) + " nanos");
}
return hKey;
}
public static byte[] getHashedKeyInBytes(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) {
final long start = System.nanoTime();
byte[] digest = getHashedKey(key, hashingAlgorithm, maxDigestBytes);
if(log.isDebugEnabled()) log.debug("Key : " + key +"; digest length : " + digest.length + "; byte Array contents : " + Arrays.toString(digest) + "; Took " + (System.nanoTime() - start) + " nanos");
return digest;
}
private static byte[] getHashedKey(String key, HashingAlgorithm hashingAlgorithm, Integer maxDigestBytes) {
HashFunction hf = null;
switch (hashingAlgorithm) {
case murmur3:
hf = Hashing.murmur3_128();
break;
case adler32:
hf = Hashing.adler32();
break;
case crc32:
hf = Hashing.crc32();
break;
case sha1:
hf = Hashing.sha1();
break;
case sha256:
hf = Hashing.sha256();
break;
case siphash24:
hf = Hashing.sipHash24();
break;
case md5:
default:
hf = Hashing.md5();
break;
}
final HashCode hc = hf.newHasher().putString(key, Charsets.UTF_8).hash();
final byte[] digest = hc.asBytes();
if (maxDigestBytes != null && maxDigestBytes > 0 && maxDigestBytes < digest.length) {
return Arrays.copyOfRange(digest, 0, maxDigestBytes);
}
return digest;
}
public static void main(String args[]) {
BasicConfigurator.resetConfiguration();
BasicConfigurator.configure(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} [%t] %p %c %x - %m%n")));
org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);
String key = "MAP_LT:721af5a5-3452-4b62-86fb-5f31ccde8d99_187978153X28X2787347X1601330156682";
System.out.println(getHashedKeyEncoded(key, HashingAlgorithm.murmur3, null, null));
}
}
| 784 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/EVCacheBulkDataDto.java
|
package com.netflix.evcache.util;
import com.netflix.evcache.EVCacheKey;
import java.util.List;
import java.util.Map;
public class EVCacheBulkDataDto<T> {
private Map<String, T> decanonicalR;
private List<EVCacheKey> evcKeys;
public EVCacheBulkDataDto(Map<String, T> decanonicalR, List<EVCacheKey> evcKeys) {
this.decanonicalR = decanonicalR;
this.evcKeys = evcKeys;
}
public Map<String, T> getDecanonicalR() {
return decanonicalR;
}
public List<EVCacheKey> getEvcKeys() {
return evcKeys;
}
public void setDecanonicalR(Map<String, T> decanonicalR) {
this.decanonicalR = decanonicalR;
}
public void setEvcKeys(List<EVCacheKey> evcKeys) {
this.evcKeys = evcKeys;
}
}
| 785 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/ServerGroupCircularIterator.java
|
package com.netflix.evcache.util;
import java.util.Iterator;
import java.util.Set;
import com.netflix.evcache.pool.ServerGroup;
/**
* A circular iterator for ReplicaSets. This ensures that all ReplicaSets are
* equal number of requests.
*
* @author smadappa
*/
public class ServerGroupCircularIterator {
private Entry<ServerGroup> entry;
private int size = 0;
/**
* Creates an instance of ReplicaSetCircularIterator across all ReplicaSets.
*
* @param allReplicaSets
* Set of all available ReplicaSets.
*/
public ServerGroupCircularIterator(Set<ServerGroup> allReplicaSets) {
if (allReplicaSets == null || allReplicaSets.isEmpty()) return;
Entry<ServerGroup> pEntry = null;
for (Iterator<ServerGroup> itr = allReplicaSets.iterator(); itr.hasNext();) {
size++;
final ServerGroup rSet = itr.next();
final Entry<ServerGroup> newEntry = new Entry<ServerGroup>(rSet, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next ReplicaSet which should get the request.
*
* @return - the next ReplicaSetCircularIterator in the iterator. If there
* are none then null is returned.
*/
public ServerGroup next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next ReplicaSet excluding the given ReplicaSet which should
* get the request.
*
* @return - the next ReplicaSet in the iterator. If there are none then
* null is returned.
*/
public ServerGroup next(ServerGroup ignoreReplicaSet) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreReplicaSet)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
public String toString() {
final StringBuilder current = new StringBuilder();
if (entry != null) {
Entry<ServerGroup> startEntry = entry;
current.append(startEntry.element);
while (!entry.next.equals(startEntry)) {
current.append(",").append(entry.next.element);
entry = entry.next;
}
}
return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}";
}
}
| 786 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/Sneaky.java
|
package com.netflix.evcache.util;
/**
* Sneaky can be used to sneakily throw checked exceptions without actually declaring this in your method's throws clause.
* This somewhat contentious ability should be used carefully, of course.
*/
public class Sneaky {
public static RuntimeException sneakyThrow(Throwable t) {
if ( t == null ) throw new NullPointerException("t");
Sneaky.<RuntimeException>sneakyThrow0(t);
return null;
}
@SuppressWarnings("unchecked")
private static <T extends Throwable> void sneakyThrow0(Throwable t) throws T {
throw (T)t;
}
}
| 787 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/ZoneFallbackIterator.java
|
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.evcache.util;
import java.util.Iterator;
import java.util.Set;
/**
* A Zone Based fallback circular iterator. This ensures that during a fallback
* scenario the requests are spread out across all zones evenly.
*
* @author smadappa
*/
public class ZoneFallbackIterator {
private Entry<String> entry;
private int size = 0;
/**
* Creates an instance of ZoneFallbackIterator given all the zones.
*
* @param allZones
* Set of all available zones.
*/
public ZoneFallbackIterator(Set<String> allZones) {
if (allZones == null || allZones.size() == 0) return;
Entry<String> pEntry = null;
for (Iterator<String> itr = allZones.iterator(); itr.hasNext();) {
size++;
final String zone = itr.next();
final Entry<String> newEntry = new Entry<String>(zone, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next zone from the set which should get the request.
*
* @return - the next zone in the iterator. If there are none then null is
* returned.
*/
public String next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next zone from the set excluding the given zone which should
* get the request.
*
* @return - the next zone in the iterator. If there are none then null is
* returned.
*/
public String next(String ignoreZone) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreZone)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
}
| 788 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/SupplierUtils.java
|
package com.netflix.evcache.util;
import java.util.concurrent.Callable;
import java.util.function.Supplier;
public final class SupplierUtils {
private SupplierUtils() {
}
public static <T> Supplier<T> wrap(Callable<T> callable) {
return () -> {
try {
return callable.call();
}
catch (RuntimeException e) {
throw e;
}
catch (Exception e) {
throw new RuntimeException(e);
}
};
}
}
| 789 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/util/CircularIterator.java
|
package com.netflix.evcache.util;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.Iterator;
/**
* A circular iterator for ReplicaSets. This ensures that all ReplicaSets are
* equal number of requests.
*
* @author smadappa
*/
public class CircularIterator<T> {
private Entry<T> entry;
private int size = 0;
/**
* Creates an instance of ReplicaSetCircularIterator across all ReplicaSets.
*
* @param allReplicaSets
* Set of all available ReplicaSets.
*/
public CircularIterator(Collection<T> allReplicaSets) {
if (allReplicaSets == null || allReplicaSets.isEmpty()) return;
Entry<T> pEntry = null;
for (Iterator<T> itr = allReplicaSets.iterator(); itr.hasNext();) {
size++;
final T rSet = itr.next();
final Entry<T> newEntry = new Entry<T>(rSet, pEntry);
if (entry == null) entry = newEntry;
pEntry = newEntry;
}
/*
* Connect the first and the last entry to form a circular list
*/
if (pEntry != null) {
entry.next = pEntry;
}
}
/**
* Returns the next ReplicaSet which should get the request.
*
* @return - the next ReplicaSetCircularIterator in the iterator. If there
* are none then null is returned.
*/
public T next() {
if (entry == null) return null;
entry = entry.next;
return entry.element;
}
/**
* Returns the next ReplicaSet excluding the given ReplicaSet which should
* get the request.
*
* @return - the next ReplicaSet in the iterator. If there are none then
* null is returned.
*/
public T next(T ignoreReplicaSet) {
if (entry == null) return null;
entry = entry.next;
if (entry.element.equals(ignoreReplicaSet)) {
return entry.next.element;
} else {
return entry.element;
}
}
public int getSize() {
return size;
}
/**
* The Entry keeps track of the current element and next element in the
* list.
*
* @author smadappa
*
* @param <E>
*/
static class Entry<E> {
private E element;
private Entry<E> next;
/**
* Creates an instance of Entry.
*/
Entry(E element, Entry<E> next) {
this.element = element;
this.next = next;
}
}
public String toString() {
final StringBuilder current = new StringBuilder();
if (entry != null) {
Entry<T> startEntry = entry;
if(startEntry.element.getClass().isArray()) {
for(int i = 0; i < Array.getLength(startEntry.element); i++) {
if(i > 0) current.append(",");
current.append("[").append(i).append(", ").append(Array.get(startEntry.element, i).toString()).append("]");
}
} else {
current.append(startEntry.element);
}
while (!entry.next.equals(startEntry)) {
if(entry.next.element.getClass().isArray()) {
for(int i = 0; i < Array.getLength(entry.next.element); i++) {
if(i > 0) current.append(",");
current.append("[").append(i).append(", ").append(Array.get(entry.next.element, i).toString()).append("]");
}
} else {
current.append(",[").append(entry.next.element).append("]");
}
entry = entry.next;
}
}
return "Server Group Iterator : { size=" + getSize() + "; Server Group=" + current.toString() + "}";
}
}
| 790 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/config/EVCachePersistedProperties.java
|
package com.netflix.evcache.config;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.config.PollingDynamicConfig;
import com.netflix.archaius.config.polling.FixedPollingStrategy;
import com.netflix.archaius.persisted2.DefaultPersisted2ClientConfig;
import com.netflix.archaius.persisted2.JsonPersistedV2Reader;
import com.netflix.archaius.persisted2.Persisted2ClientConfig;
import com.netflix.archaius.persisted2.ScopePredicates;
import com.netflix.archaius.persisted2.loader.HTTPStreamLoader;
public class EVCachePersistedProperties {
private static Logger log = LoggerFactory.getLogger(EVCachePersistedProperties.class);
private static final String SCOPE_CLUSTER = "cluster";
private static final String SCOPE_AMI = "ami";
private static final String SCOPE_ZONE = "zone";
private static final String SCOPE_ASG = "asg";
private static final String SCOPE_SERVER_ID = "serverId";
private static final String SCOPE_REGION = "region";
private static final String SCOPE_STACK = "stack";
private static final String SCOPE_ENV = "env";
private static final String SCOPE_APP_ID = "appId";
private PollingDynamicConfig config;
public EVCachePersistedProperties() {
}
private Persisted2ClientConfig getConfig() {
final String region = System.getProperty("netflix.region", getSystemEnvValue("NETFLIX_REGION", "us-east-1"));
final String env = System.getProperty("netflix.environment", getSystemEnvValue("NETFLIX_ENVIRONMENT", "test"));
String url = System.getProperty("platformserviceurl", "http://platformservice."+region+".dyn" + env +".netflix.net:7001/platformservice/REST/v2/properties/jsonFilterprops");
return new DefaultPersisted2ClientConfig()
.setEnabled(true)
.withServiceUrl(url)
.withQueryScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", "")), "")
.withQueryScope(SCOPE_ENV, env, "")
.withQueryScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", "")), "")
.withQueryScope(SCOPE_REGION, region, "")
.withScope(SCOPE_APP_ID, System.getProperty("netflix.appId", getSystemEnvValue("NETFLIX_APP", "")))
.withScope(SCOPE_ENV, env)
.withScope(SCOPE_STACK, System.getProperty("netflix.stack", getSystemEnvValue("NETFLIX_STACK", "")))
.withScope(SCOPE_REGION, region)
.withScope(SCOPE_SERVER_ID, System.getProperty("netflix.serverId", getSystemEnvValue("NETFLIX_INSTANCE_ID", "")))
.withScope(SCOPE_ASG, System.getProperty("netflix.appinfo.asgName", getSystemEnvValue("NETFLIX_AUTO_SCALE_GROUP", "")))
.withScope(SCOPE_ZONE, getSystemEnvValue("EC2_AVAILABILITY_ZONE", ""))
.withScope(SCOPE_AMI, getSystemEnvValue("EC2_AMI_ID", ""))
.withScope(SCOPE_CLUSTER, getSystemEnvValue("NETFLIX_CLUSTER", ""))
.withPrioritizedScopes(SCOPE_SERVER_ID, SCOPE_ASG, SCOPE_AMI, SCOPE_CLUSTER, SCOPE_APP_ID, SCOPE_ENV, SCOPE_STACK, SCOPE_ZONE, SCOPE_REGION)
;
}
private String getSystemEnvValue(String key, String def) {
final String val = System.getenv(key);
return val == null ? def : val;
}
private String getFilterString(Map<String, Set<String>> scopes) {
StringBuilder sb = new StringBuilder();
for (Entry<String, Set<String>> scope : scopes.entrySet()) {
if (scope.getValue().isEmpty())
continue;
if (sb.length() > 0) {
sb.append(" and ");
}
sb.append("(");
boolean first = true;
for (String value : scope.getValue()) {
if (!first) {
sb.append(" or ");
}
else {
first = false;
}
sb.append(scope.getKey());
if (null == value) {
sb.append(" is null");
}
else if (value.isEmpty()) {
sb.append("=''");
}
else {
sb.append("='").append(value).append("'");
}
}
sb.append(")");
}
return sb.toString();
}
public PollingDynamicConfig getPollingDynamicConfig() {
try {
Persisted2ClientConfig clientConfig = getConfig();
log.info("Remote config : " + clientConfig);
String url = new StringBuilder()
.append(clientConfig.getServiceUrl())
.append("?skipPropsWithExtraScopes=").append(clientConfig.getSkipPropsWithExtraScopes())
.append("&filter=").append(URLEncoder.encode(getFilterString(clientConfig.getQueryScopes()), "UTF-8"))
.toString();
if (clientConfig.isEnabled()) {
JsonPersistedV2Reader reader = JsonPersistedV2Reader.builder(new HTTPStreamLoader(new URL(url)))
.withPath("propertiesList")
.withScopes(clientConfig.getPrioritizedScopes())
.withPredicate(ScopePredicates.fromMap(clientConfig.getScopes()))
.build();
config = new PollingDynamicConfig(reader, new FixedPollingStrategy(clientConfig.getRefreshRate(), TimeUnit.SECONDS));
return config;
}
} catch (Exception e1) {
throw new RuntimeException(e1);
}
return null;
}
}
| 791 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPool.java
|
package com.netflix.evcache.pool;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.pool.observer.EVCacheConnectionObserver;
import com.netflix.evcache.util.CircularIterator;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.evcache.util.ServerGroupCircularIterator;
import com.netflix.spectator.api.BasicTag;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Tag;
import net.spy.memcached.EVCacheNode;
import net.spy.memcached.MemcachedNode;
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "REC_CATCH_EXCEPTION", "MDM_THREAD_YIELD" })
public class EVCacheClientPool implements Runnable, EVCacheClientPoolMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheClientPool.class);
private final String _appName;
private final String _zone;
private final EVCacheClientPoolManager manager;
private ServerGroupCircularIterator localServerGroupIterator = null;
private final Property<Boolean> _zoneAffinity;
private final Property<Integer> _poolSize; // Number of MemcachedClients to each cluster
private final Property<Integer> _readTimeout; // Timeout for readOperation
private final Property<Integer> _bulkReadTimeout; // Timeout for readOperation
public static final String DEFAULT_PORT = "11211";
public static final String DEFAULT_SECURE_PORT = "11443";
private final Property<Boolean> _retryAcrossAllReplicas;
private long lastReconcileTime = 0;
private final Property<Integer> logOperations;
private final Property<Set<String>> logOperationCalls;
private final Property<Set<String>> cloneWrite;
// name of the duet EVCache application, if applicable.
private final Property<String> duet;
// indicates if duet needs to be primary
private final Property<Boolean> duetPrimary;
// evCacheClientPool of the duet EVCache application, if applicable. Supports daisy chaining.
private EVCacheClientPool duetClientPool;
// indicates if this evCacheClientPool is a duet. This property is used to mark EVCacheClients of this pool
// as duet if applicable. The duet property on the EVCacheClient is then used to know what kind of key of
// EVCacheKey (i.e. normal key vs duet key) should be passed to the client
private boolean isDuet;
private final Property<Integer> _opQueueMaxBlockTime; // Timeout for adding an operation
private final Property<Integer> _operationTimeout;// Timeout for write operation
private final Property<Integer> _maxReadQueueSize;
private final Property<Integer> reconcileInterval;
private final Property<Integer> _maxRetries;
private final Property<Boolean> _pingServers;
private final Property<Boolean> refreshConnectionOnReadQueueFull;
private final Property<Integer> refreshConnectionOnReadQueueFullSize;
private final ThreadPoolExecutor asyncRefreshExecutor;
private final Property<Boolean> _disableAsyncRefresh;
private final List<Tag> tagList;
// private final Id poolSizeId;
//private final Map<String, Counter> counterMap = new ConcurrentHashMap<String, Counter>();
private final Map<String, Gauge> gaugeMap = new ConcurrentHashMap<String, Gauge>();
private final ReentrantLock refreshLock = new ReentrantLock();
@SuppressWarnings("serial")
private final Map<ServerGroup, Property<Boolean>> writeOnlyFastPropertyMap = new ConcurrentHashMap<ServerGroup, Property<Boolean>>() {
@Override
public Property<Boolean> get(Object _serverGroup) {
final ServerGroup serverGroup = ServerGroup.class.cast(_serverGroup);
Property<Boolean> isServerGroupInWriteOnlyMode = super.get(serverGroup);
if (isServerGroupInWriteOnlyMode != null) return isServerGroupInWriteOnlyMode;
isServerGroupInWriteOnlyMode = EVCacheConfig.getInstance().
getPropertyRepository().get(_appName + "." + serverGroup.getName() + ".EVCacheClientPool.writeOnly", Boolean.class)
.orElseGet(_appName + "." + serverGroup.getZone() + ".EVCacheClientPool.writeOnly").orElse(false);
put(serverGroup, isServerGroupInWriteOnlyMode);
return isServerGroupInWriteOnlyMode;
};
};
private final AtomicLong numberOfModOps = new AtomicLong(0);
private boolean _shutdown = false;
private Map<ServerGroup, List<EVCacheClient>> memcachedInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>();
private Map<ServerGroup, List<EVCacheClient>> memcachedReadInstancesByServerGroup = new ConcurrentHashMap<ServerGroup, List<EVCacheClient>>();
private Map<ServerGroup, List<EVCacheClient>> memcachedWriteInstancesByServerGroup = new ConcurrentSkipListMap<ServerGroup, List<EVCacheClient>>();
private final Map<InetSocketAddress, Long> evCacheDiscoveryConnectionLostSet = new ConcurrentHashMap<InetSocketAddress, Long>();
private Map<String, ServerGroupCircularIterator> readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>();
private ServerGroupCircularIterator memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet());
private CircularIterator<EVCacheClient[]> allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(Collections.<EVCacheClient[]> emptyList());
private final EVCacheNodeList provider;
EVCacheClientPool(final String appName, final EVCacheNodeList provider, final ThreadPoolExecutor asyncRefreshExecutor, final EVCacheClientPoolManager manager, boolean isDuet) {
this._appName = appName;
this.provider = provider;
this.asyncRefreshExecutor = asyncRefreshExecutor;
this.manager = manager;
this.isDuet = isDuet;
String ec2Zone = System.getenv("EC2_AVAILABILITY_ZONE");
if (ec2Zone == null) ec2Zone = System.getProperty("EC2_AVAILABILITY_ZONE");
this._zone = (ec2Zone == null) ? "GLOBAL" : ec2Zone;
final EVCacheConfig config = EVCacheConfig.getInstance();
final Consumer<Integer> callback = t -> {
clearState();
refreshPool(true, true);
};
this._zoneAffinity = config.getPropertyRepository().get(appName + ".EVCacheClientPool.zoneAffinity", Boolean.class).orElse(true);
this._poolSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.poolSize", Integer.class).orElse(1);
this._poolSize.subscribe(callback);
this._readTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.readTimeout", Integer.class).orElse(manager.getDefaultReadTimeout().get());
this._readTimeout.subscribe(callback);
this._bulkReadTimeout = config.getPropertyRepository().get(appName + ".EVCacheClientPool.bulkReadTimeout", Integer.class).orElse(_readTimeout.get());
this._bulkReadTimeout.subscribe(callback);
this.refreshConnectionOnReadQueueFull = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull", Boolean.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull").orElse(false);
this.refreshConnectionOnReadQueueFullSize = config.getPropertyRepository().get(appName + ".EVCacheClientPool.refresh.connection.on.readQueueFull.size", Integer.class).orElseGet("EVCacheClientPool.refresh.connection.on.readQueueFull.size").orElse(100);
this._opQueueMaxBlockTime = config.getPropertyRepository().get(appName + ".operation.QueueMaxBlockTime", Integer.class).orElse(10);
this._opQueueMaxBlockTime.subscribe(callback);
this._operationTimeout = config.getPropertyRepository().get(appName + ".operation.timeout", Integer.class).orElseGet("evcache.operation.timeout").orElse(2500);
this._operationTimeout.subscribe(callback);
this._maxReadQueueSize = config.getPropertyRepository().get(appName + ".max.read.queue.length", Integer.class).orElse(50);
this._retryAcrossAllReplicas = config.getPropertyRepository().get(_appName + ".retry.all.copies", Boolean.class).orElse(false);
this._disableAsyncRefresh = config.getPropertyRepository().get(_appName + ".disable.async.refresh", Boolean.class).orElse(false);
this._maxRetries = config.getPropertyRepository().get(_appName + ".max.retry.count", Integer.class).orElse(1);
Function<String, Set<String>> splitSet = t -> Arrays.stream(t.split(",")).collect(Collectors.toSet());
this.logOperations = config.getPropertyRepository().get(appName + ".log.operation", Integer.class).orElse(0);
this.logOperationCalls = config.getPropertyRepository().get(appName + ".log.operation.calls", String.class).orElse("SET,DELETE,GMISS,TMISS,BMISS_ALL,TOUCH,REPLACE").map(splitSet);
this.reconcileInterval = config.getPropertyRepository().get(appName + ".reconcile.interval", Integer.class).orElse(600000);
this.cloneWrite = config.getPropertyRepository().get(appName + ".clone.writes.to", String.class).map(splitSet).orElse(Collections.emptySet());
this.cloneWrite.subscribe(i -> {
setupClones();
});
this.duet = config.getPropertyRepository().get(appName + ".duet", String.class).orElseGet("evcache.duet").orElse("");
this.duet.subscribe(i -> {
setupDuet();
});
this.duetPrimary = config.getPropertyRepository().get(appName + ".duet.primary", Boolean.class).orElseGet("evcache.duet.primary").orElse(false);
tagList = new ArrayList<Tag>(2);
EVCacheMetricsFactory.getInstance().addAppNameTags(tagList, _appName);
this._pingServers = config.getPropertyRepository().get(appName + ".ping.servers", Boolean.class).orElseGet("evcache.ping.servers").orElse(false);
setupMonitoring();
//init all callbacks
refreshPool(false, true);
setupDuet();
setupClones();
if (log.isInfoEnabled()) log.info(toString());
}
private void setupClones() {
for(String cloneApp : cloneWrite.get()) {
manager.initEVCache(cloneApp);
}
}
private void setupDuet() {
// check if duet is already setup, if yes, remove the current duet.
if (duetClientPool != null && !duetClientPool.getAppName().equalsIgnoreCase(duet.get())) {
duetClientPool = null;
log.info("Removed duet");
}
if (null == duetClientPool && !duet.get().isEmpty()) {
duetClientPool = manager.initEVCache(duet.get(), true);
log.info("Completed setup of a duet with name: " + duet.get());
}
}
private void clearState() {
cleanupMemcachedInstances(true);
memcachedInstancesByServerGroup.clear();
memcachedReadInstancesByServerGroup.clear();
memcachedWriteInstancesByServerGroup.clear();
readServerGroupByZone.clear();
memcachedFallbackReadInstances = new ServerGroupCircularIterator(Collections.<ServerGroup> emptySet());
}
private EVCacheClient getEVCacheClientForReadInternal() {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
List<EVCacheClient> clients = null;
if (_zoneAffinity.get()) {
if (localServerGroupIterator != null) {
clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next());
}
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return null;
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
} else {
clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1);
for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr
.hasNext();) {
final ServerGroup serverGroup = itr.next();
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
}
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", t);
return null;
}
}
/**
* Returns EVCacheClient of this pool if available. Otherwise, will return EVCacheClient of the duet.
* @return
*/
public EVCacheClient getEVCacheClientForRead() {
EVCacheClient evCacheClient = getEVCacheClientForReadInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClientForRead();
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private List<EVCacheClient> getAllEVCacheClientForReadInternal() {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return Collections.<EVCacheClient> emptyList();
}
try {
List<EVCacheClient> clients = null;
if (localServerGroupIterator != null) {
clients = memcachedReadInstancesByServerGroup.get(localServerGroupIterator.next());
}
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return Collections.<EVCacheClient> emptyList();
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
return clients;
} catch (Throwable t) {
log.error("Exception trying to get readable EVCache Instances for zone ", t);
return Collections.<EVCacheClient> emptyList();
}
}
public List<EVCacheClient> getAllEVCacheClientForRead() {
List<EVCacheClient> evCacheClients = getAllEVCacheClientForReadInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
List<EVCacheClient> duetEVCacheClients = duetClientPool.getAllEVCacheClientForRead();
if (null == evCacheClients)
return duetEVCacheClients;
if (null == duetEVCacheClients)
return evCacheClients;
if (duetPrimary.get()) {
List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients);
clients.addAll(evCacheClients);
return clients;
} else {
List<EVCacheClient> clients = new ArrayList<>(evCacheClients);
clients.addAll(duetEVCacheClients);
return clients;
}
}
private EVCacheClient selectClient(List<EVCacheClient> clients) {
if (clients == null || clients.isEmpty()) {
if (log.isDebugEnabled()) log.debug("clients is null returning null and forcing pool refresh!!!");
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
if (clients.size() == 1) {
return clients.get(0); // Frequently used scenario
}
final long currentVal = numberOfModOps.incrementAndGet();
// Get absolute value of current val to ensure correctness even at 9 quintillion+ requests
// make sure to truncate after the mod. This allows up to 2^31 clients.
final int index = Math.abs((int) (currentVal % clients.size()));
return clients.get(index);
}
private EVCacheClient getEVCacheClientForReadExcludeInternal(ServerGroup rsetUsed) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(rsetUsed);
if (fallbackServerGroup == null || fallbackServerGroup.equals(rsetUsed)) {
return null;
}
final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", rsetUsed, t);
return null;
}
}
public EVCacheClient getEVCacheClientForReadExclude(ServerGroup rsetUsed) {
EVCacheClient evCacheClient = getEVCacheClientForReadExcludeInternal(rsetUsed);
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClientForReadExclude(rsetUsed);
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private EVCacheClient getEVCacheClientInternal(ServerGroup serverGroup) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return null;
}
try {
List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup);
if (clients == null) {
final ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next();
if (fallbackServerGroup == null) {
if (log.isDebugEnabled()) log.debug("fallbackServerGroup is null.");
return null;
}
clients = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
}
return selectClient(clients);
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for ServerGroup {}", serverGroup, t);
return null;
}
}
public EVCacheClient getEVCacheClient(ServerGroup serverGroup) {
EVCacheClient evCacheClient = getEVCacheClientInternal(serverGroup);
// most common production scenario
if (null == duetClientPool) {
return evCacheClient;
}
// return duet if current client is not available or if duet is primary
if (null == evCacheClient || duetPrimary.get()) {
EVCacheClient duetClient = duetClientPool.getEVCacheClient(serverGroup);
// if duetClient is not present, fallback to evCacheClient
return null == duetClient ? evCacheClient : duetClient;
}
return evCacheClient;
}
private List<EVCacheClient> getEVCacheClientsForReadExcludingInternal(ServerGroup serverGroupToExclude) {
if (memcachedReadInstancesByServerGroup == null || memcachedReadInstancesByServerGroup.isEmpty()) {
if (log.isDebugEnabled()) log.debug("memcachedReadInstancesByServerGroup : " + memcachedReadInstancesByServerGroup);
if(asyncRefreshExecutor.getQueue().isEmpty()) refreshPool(true, true);
return Collections.<EVCacheClient> emptyList();
}
try {
if (_retryAcrossAllReplicas.get()) {
List<EVCacheClient> clients = new ArrayList<EVCacheClient>(memcachedReadInstancesByServerGroup.size() - 1);
for (Iterator<ServerGroup> itr = memcachedReadInstancesByServerGroup.keySet().iterator(); itr
.hasNext();) {
final ServerGroup serverGroup = itr.next();
if (serverGroup.equals(serverGroupToExclude)) continue;
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(serverGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
return clients;
} else {
if(_maxRetries.get() == 1) {
final EVCacheClient client = getEVCacheClientForReadExclude(serverGroupToExclude);
if (client != null) return Collections.singletonList(client);
} else {
int maxNumberOfPossibleRetries = memcachedReadInstancesByServerGroup.size() - 1;
if(maxNumberOfPossibleRetries > _maxRetries.get()) {
maxNumberOfPossibleRetries = _maxRetries.get();
}
final List<EVCacheClient> clients = new ArrayList<EVCacheClient>(_maxRetries.get());
for(int i = 0; i < maxNumberOfPossibleRetries; i++) {
ServerGroup fallbackServerGroup = memcachedFallbackReadInstances.next(serverGroupToExclude);
if (fallbackServerGroup == null ) {
return clients;
}
final List<EVCacheClient> clientList = memcachedReadInstancesByServerGroup.get(fallbackServerGroup);
final EVCacheClient client = selectClient(clientList);
if (client != null) clients.add(client);
}
return clients;
}
}
} catch (Throwable t) {
log.error("Exception trying to get an readable EVCache Instances for zone {}", serverGroupToExclude, t);
}
return Collections.<EVCacheClient> emptyList();
}
public List<EVCacheClient> getEVCacheClientsForReadExcluding(ServerGroup serverGroupToExclude) {
List<EVCacheClient> evCacheClients = getEVCacheClientsForReadExcludingInternal(serverGroupToExclude);
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
List<EVCacheClient> duetEVCacheClients = duetClientPool.getEVCacheClientsForReadExcluding(serverGroupToExclude);
if (null == evCacheClients)
return duetEVCacheClients;
if (null == duetEVCacheClients)
return evCacheClients;
if (duetPrimary.get()) {
List<EVCacheClient> clients = new ArrayList<>(duetEVCacheClients);
clients.addAll(evCacheClients);
return clients;
} else {
List<EVCacheClient> clients = new ArrayList<>(evCacheClients);
clients.addAll(duetEVCacheClients);
return clients;
}
}
public boolean isInWriteOnly(ServerGroup serverGroup) {
if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
return false;
}
if(memcachedWriteInstancesByServerGroup.containsKey(serverGroup)) {
return true;
}
return false;
}
private EVCacheClient[] getWriteOnlyEVCacheClientsInternal() {
try {
if((cloneWrite.get().size() == 0)) {
int size = memcachedWriteInstancesByServerGroup.size() - memcachedReadInstancesByServerGroup.size();
if (size == 0) return new EVCacheClient[0];
final EVCacheClient[] clientArr = new EVCacheClient[size];
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup) && size > 0) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
clientArr[--size] = clients.get(0); // frequently used use case
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
clientArr[--size] = (index < 0) ? clients.get(0) : clients.get(index);
}
}
}
return clientArr;
} else {
final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>();
for(String cloneApp : cloneWrite.get()) {
final EVCacheClient[] clients = manager.getEVCacheClientPool(cloneApp).getWriteOnlyEVCacheClients();
if(clients == null || clients.length == 0) continue;
for(int i = 0; i < clients.length; i++) {
evcacheClientList.add(clients[i]);
}
}
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
evcacheClientList.add(clients.get(0)); // frequently used use case
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
evcacheClientList.add((index < 0) ? clients.get(0) : clients.get(index));
}
}
}
return evcacheClientList.toArray(new EVCacheClient[0]);
}
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
public EVCacheClient[] getWriteOnlyEVCacheClients() {
EVCacheClient[] evCacheClients = getWriteOnlyEVCacheClientsInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
EVCacheClient[] duetEVCacheClients = duetClientPool.getWriteOnlyEVCacheClients();
if (null == evCacheClients || evCacheClients.length == 0) {
return duetEVCacheClients;
}
if (null == duetEVCacheClients || duetEVCacheClients.length == 0) {
return evCacheClients;
}
if (duetPrimary.get()) {
// return write-only of duet app and all writers of original app to which duet is attached
// get all writers of original app
evCacheClients = getEVCacheClientForWriteInternal();
EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length);
System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length);
return allEVCacheClients;
} else {
// return write-only of original app and all writers of duet app
// get all writers of duet app
duetEVCacheClients = duetClientPool.getEVCacheClientForWrite();
EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length);
System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length);
return allEVCacheClients;
}
}
EVCacheClient[] getAllWriteClients() {
try {
if(allEVCacheWriteClients != null) {
EVCacheClient[] clientArray = allEVCacheWriteClients.next();
if(clientArray == null || clientArray.length == 0 ) {
if (log.isInfoEnabled()) log.info("Refreshing the write client array.");
try {
refreshLock.lock();
clientArray = allEVCacheWriteClients.next();
if(clientArray == null || clientArray.length == 0 ) {
refreshPool(false, true);
clientArray = allEVCacheWriteClients.next();
}
}
finally {
refreshLock.unlock();
}
}
if (log.isDebugEnabled()) log.debug("clientArray : " + clientArray);
if(clientArray == null ) return new EVCacheClient[0];
return clientArray;
}
final EVCacheClient[] clientArr = new EVCacheClient[memcachedWriteInstancesByServerGroup.size()];
int i = 0;
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if (clients.size() == 1) {
clientArr[i++] = clients.get(0); // frequently used usecase
} else {
final long currentVal = numberOfModOps.incrementAndGet();
final int index = (int) (currentVal % clients.size());
clientArr[i++] = (index < 0) ? clients.get(0) : clients.get(index);
}
}
if(clientArr == null ) return new EVCacheClient[0];
return clientArr;
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
private EVCacheClient[] getEVCacheClientForWriteInternal() {
try {
if((cloneWrite.get().size() == 0)) {
return getAllWriteClients();
} else {
final List<EVCacheClient> evcacheClientList = new ArrayList<EVCacheClient>();
final EVCacheClient[] clientArr = getAllWriteClients();
for(EVCacheClient client : clientArr) {
evcacheClientList.add(client);
}
for(String cloneApp : cloneWrite.get()) {
final EVCacheClient[] cloneWriteArray = manager.getEVCacheClientPool(cloneApp).getAllWriteClients();
for(int j = 0; j < cloneWriteArray.length; j++) {
evcacheClientList.add(cloneWriteArray[j]);
}
}
return evcacheClientList.toArray(new EVCacheClient[0]);
}
} catch (Throwable t) {
log.error("Exception trying to get an array of writable EVCache Instances", t);
return new EVCacheClient[0];
}
}
public EVCacheClient[] getEVCacheClientForWrite() {
EVCacheClient[] evCacheClients = getEVCacheClientForWriteInternal();
// most common production scenario
if (null == duetClientPool) {
return evCacheClients;
}
EVCacheClient[] duetEVCacheClients = duetClientPool.getEVCacheClientForWrite();
if (null == evCacheClients || evCacheClients.length == 0) {
return duetEVCacheClients;
}
if (null == duetEVCacheClients || duetEVCacheClients.length == 0) {
return evCacheClients;
}
if (duetPrimary.get()) {
EVCacheClient[] allEVCacheClients = Arrays.copyOf(duetEVCacheClients, duetEVCacheClients.length + evCacheClients.length);
System.arraycopy(evCacheClients, 0, allEVCacheClients, duetEVCacheClients.length, evCacheClients.length);
return allEVCacheClients;
} else {
EVCacheClient[] allEVCacheClients = Arrays.copyOf(evCacheClients, evCacheClients.length + duetEVCacheClients.length);
System.arraycopy(duetEVCacheClients, 0, allEVCacheClients, evCacheClients.length, duetEVCacheClients.length);
return allEVCacheClients;
}
}
private void refresh() throws IOException {
refresh(false);
}
protected boolean haveInstancesInServerGroupChanged(ServerGroup serverGroup, Set<InetSocketAddress> discoveredHostsInServerGroup) {
final List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup);
// 1. if we have discovered instances in zone but not in our map then
// return immediately
if (clients == null) return true;
// 2. Do a quick check based on count (active, inactive and discovered)
for (int i = 0; i < clients.size(); i++) {
final int size = clients.size();
final EVCacheClient client = clients.get(i);
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
final int sizeInDiscovery = discoveredHostsInServerGroup.size();
final int sizeInHashing = client.getNodeLocator().getAll().size();
if (i == 0) getConfigGauge("sizeInDiscovery", serverGroup).set(Long.valueOf(sizeInDiscovery));
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + "\n\tActive Count : " + activeServerCount
+ "\n\tInactive Count : " + inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing);
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tActive Count : " + activeServerCount + "\n\tInactive Count : "
+ inActiveServerCount + "\n\tDiscovery Count : " + sizeInDiscovery + "\n\tsizeInHashing : " + sizeInHashing);
final long currentTime = System.currentTimeMillis();
boolean reconcile = false;
if (currentTime - lastReconcileTime > reconcileInterval.get()) {
reconcile = true;
lastReconcileTime = currentTime;
getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(1));
} else {
getConfigGauge(EVCacheMetricsFactory.POOL_RECONCILE, serverGroup).set(Long.valueOf(0));
}
final boolean hashingSizeDiff = (sizeInHashing != sizeInDiscovery && sizeInHashing != activeServerCount);
if (reconcile || activeServerCount != sizeInDiscovery || inActiveServerCount > 0 || hashingSizeDiff) {
if (log.isDebugEnabled()) log.debug("\n\t" + _appName + " & " + serverGroup
+ " experienced an issue.\n\tActive Server Count : " + activeServerCount);
if (log.isDebugEnabled()) log.debug("\n\tInActive Server Count : " + inActiveServerCount
+ "\n\tDiscovered Instances : " + sizeInDiscovery);
// 1. If a host is in discovery and we don't have an active or
// inActive connection to it then we will have to refresh our
// list. Typical case is we have replaced an existing node or
// expanded the cluster.
for (InetSocketAddress instance : discoveredHostsInServerGroup) {
if (!connectionObserver.getActiveServers().containsKey(instance) && !connectionObserver.getInActiveServers().containsKey(instance)) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance
+ " not found and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(1));
return true;
}
}
// 2. If a host is not in discovery and is
// inActive for more than 15 mins then we will have to refresh our
// list. Typical case is we have replaced an existing node or
// decreasing the cluster. Replacing an instance should not take
// more than 20 mins (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html#types-of-instance-status-checks).
// Even if it does then we will refresh the client twice which
// should be ok.
// NOTE : For a zombie instance this will mean that it will take
// 15 mins after detaching and taking it OOS to be removed
// unless we force a refresh
// 12/5/2015 - Should we even do this anymore
for (Entry<InetSocketAddress, Long> entry : connectionObserver.getInActiveServers().entrySet()) {
if ((currentTime - entry.getValue().longValue()) > 1200000 && !discoveredHostsInServerGroup.contains(entry.getKey())) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; instance : " + entry.getKey()
+ " not found in discovery and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(2));
return true;
}
}
// 3. Check to see if there are any inactive connections. If we
// find inactive connections and this node is not in discovery
// then we will refresh the client.
final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll();
for (MemcachedNode node : allNodes) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
// If the connection to a node is not active then we
// will reconnect the client.
if (!evcNode.isActive() && !discoveredHostsInServerGroup.contains(evcNode.getSocketAddress())) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Node : " + node + " is not active. Will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(3));
return true;
}
}
}
// 4. if there is a difference in the number of nodes in the
// KetamaHashingMap then refresh
if (hashingSizeDiff) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; PoolSize : " + size + "; ActiveConnections : " + activeServerCount
+ "; InactiveConnections : " + inActiveServerCount + "; InDiscovery : " + sizeInDiscovery
+ "; InHashing : " + sizeInHashing + "; hashingSizeDiff : " + hashingSizeDiff
+ ". Since there is a diff in hashing size will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(4));
return true;
}
// 5. If a host is in not discovery and we have an active connection to it for more than 20 mins then we will refresh
// Typical case is we have replaced an existing node but it has zombie. We are able to connect to it (hypervisor) but not talk to it
// or prana has shutdown successfully but not memcached. In such scenario we will refresh the cluster
for(InetSocketAddress instance : connectionObserver.getActiveServers().keySet()) {
if(!discoveredHostsInServerGroup.contains(instance)) {
if(!evCacheDiscoveryConnectionLostSet.containsKey(instance)) {
evCacheDiscoveryConnectionLostSet.put(instance, Long.valueOf(currentTime));
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery. We will add to our list and monitor it.");
} else {
long lostDur = (currentTime - evCacheDiscoveryConnectionLostSet.get(instance).longValue());
if (lostDur >= 1200000) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery for the past 20 mins and will shutdown the client and init it again.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(5));
evCacheDiscoveryConnectionLostSet.remove(instance);
return true;
} else {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; instance : " + instance + " not found in discovery for " + lostDur + " msec.");
}
}
}
}
// 9. If we have removed all instances or took them OOS in a
// ServerGroup then shutdown the client
if (sizeInDiscovery == 0) {
if (activeServerCount == 0 || inActiveServerCount > activeServerCount) {
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Will shutdown the client since there are no active servers and no servers for this ServerGroup in disocvery.");
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(9));
return true;
}
}
}
getConfigGauge(EVCacheMetricsFactory.POOL_CHANGED, serverGroup).set(Long.valueOf(0));
}
reportPoolConifg();
return false;
}
private List<InetSocketAddress> getMemcachedSocketAddressList(final Set<InetSocketAddress> discoveredHostsInZone) {
final List<InetSocketAddress> memcachedNodesInZone = new ArrayList<InetSocketAddress>();
for (InetSocketAddress hostAddress : discoveredHostsInZone) {
memcachedNodesInZone.add(hostAddress);
}
return memcachedNodesInZone;
}
private void shutdownClientsInZone(List<EVCacheClient> clients) {
if (clients == null || clients.isEmpty()) return;
// Shutdown the old clients in 60 seconds, this will give ample time to
// cleanup anything pending in its queue
for (EVCacheClient oldClient : clients) {
try {
final boolean obsRemoved = oldClient.removeConnectionObserver();
if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved);
final boolean status = oldClient.shutdown(60, TimeUnit.SECONDS);
if (log.isDebugEnabled()) log.debug("Shutting down -> Client {" + oldClient.toString() + "}; status : "
+ status);
} catch (Exception ex) {
log.error("Exception while shutting down the old Client", ex);
}
}
}
private void setupNewClientsByServerGroup(ServerGroup serverGroup, List<EVCacheClient> newClients) {
final List<EVCacheClient> currentClients = memcachedInstancesByServerGroup.put(serverGroup, newClients);
// if the zone is in write only mode then remove it from the Map
final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup);
if (isZoneInWriteOnlyMode.get().booleanValue()) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
} else {
memcachedReadInstancesByServerGroup.put(serverGroup, newClients);
}
memcachedWriteInstancesByServerGroup.put(serverGroup, newClients);
setupAllEVCacheWriteClientsArray();
if (currentClients == null || currentClients.isEmpty()) return;
// Now since we have replace the old instances shutdown all the old
// clients
if (log.isDebugEnabled()) log.debug("Replaced an existing Pool for ServerGroup : " + serverGroup + "; and app "
+ _appName + " ;\n\tOldClients : " + currentClients + ";\n\tNewClients : " + newClients);
for (EVCacheClient client : currentClients) {
if (!client.isShutdown()) {
if (log.isDebugEnabled()) log.debug("Shutting down in Fallback -> AppName : " + _appName
+ "; ServerGroup : " + serverGroup + "; client {" + client + "};");
try {
if (client.getConnectionObserver() != null) {
final boolean obsRemoved = client.removeConnectionObserver();
if (log.isDebugEnabled()) log.debug("Connection observer removed " + obsRemoved);
}
final boolean status = client.shutdown(5, TimeUnit.SECONDS);
if (log.isDebugEnabled()) log.debug("Shutting down {" + client + "} ; status : " + status);
} catch (Exception ex) {
log.error("Exception while shutting down the old Client", ex);
}
}
}
// Paranoid Here. Even though we have shutdown the old clients do it
// again as we noticed issues while shutting down MemcachedNodes
shutdownClientsInZone(currentClients);
}
// Check if a zone has been moved to Write only. If so, remove the app from
// the read map.
// Similarly if the app has been moved to Read+Write from write only add it
// back to the read map.
private void updateMemcachedReadInstancesByZone() {
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
final Property<Boolean> isZoneInWriteOnlyMode = writeOnlyFastPropertyMap.get(serverGroup);
if (isZoneInWriteOnlyMode.get().booleanValue()) {
if (memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
}
} else {
if (!memcachedReadInstancesByServerGroup.containsKey(serverGroup)) {
memcachedReadInstancesByServerGroup.put(serverGroup, memcachedInstancesByServerGroup.get(serverGroup));
}
}
// if we lose over 50% of instances put that zone in writeonly mode.
final List<EVCacheClient> clients = memcachedReadInstancesByServerGroup.get(serverGroup);
if (clients != null && !clients.isEmpty()) {
final EVCacheClient client = clients.get(0);
if (client != null) {
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if (connectionObserver != null) {
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
if (inActiveServerCount > activeServerCount) {
memcachedReadInstancesByServerGroup.remove(serverGroup);
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(1));
} else {
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(2));
}
}
}
} else {
final List<EVCacheClient> clientsWrite = memcachedInstancesByServerGroup.get(serverGroup);
if (clientsWrite != null && !clientsWrite.isEmpty()) {
getConfigGauge(EVCacheMetricsFactory.POOL_SERVERGROUP_STATUS, serverGroup).set(Long.valueOf(0));
}
}
}
if (memcachedReadInstancesByServerGroup.size() != memcachedFallbackReadInstances.getSize()) {
memcachedFallbackReadInstances = new ServerGroupCircularIterator(memcachedReadInstancesByServerGroup.keySet());
Map<String, Set<ServerGroup>> readServerGroupByZoneMap = new ConcurrentHashMap<String, Set<ServerGroup>>();
for (ServerGroup serverGroup : memcachedReadInstancesByServerGroup.keySet()) {
Set<ServerGroup> serverGroupList = readServerGroupByZoneMap.get(serverGroup.getZone());
if (serverGroupList == null) {
serverGroupList = new HashSet<ServerGroup>();
readServerGroupByZoneMap.put(serverGroup.getZone(), serverGroupList);
}
serverGroupList.add(serverGroup);
}
Map<String, ServerGroupCircularIterator> _readServerGroupByZone = new ConcurrentHashMap<String, ServerGroupCircularIterator>();
for (Entry<String, Set<ServerGroup>> readServerGroupByZoneEntry : readServerGroupByZoneMap.entrySet()) {
_readServerGroupByZone.put(readServerGroupByZoneEntry.getKey(), new ServerGroupCircularIterator(readServerGroupByZoneEntry.getValue()));
}
this.readServerGroupByZone = _readServerGroupByZone;
localServerGroupIterator = readServerGroupByZone.get(_zone);
}
}
private void cleanupMemcachedInstances(boolean force) {
pingServers();
for (Iterator<Entry<ServerGroup, List<EVCacheClient>>> it = memcachedInstancesByServerGroup.entrySet().iterator(); it.hasNext();) {
final Entry<ServerGroup, List<EVCacheClient>> serverGroupEntry = it.next();
final List<EVCacheClient> instancesInAServerGroup = serverGroupEntry.getValue();
boolean removeEntry = false;
for (EVCacheClient client : instancesInAServerGroup) {
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if (connectionObserver.getActiveServerCount() == 0 && connectionObserver.getInActiveServerCount() > 0) {
removeEntry = true;
}
}
if (force || removeEntry) {
final ServerGroup serverGroup = serverGroupEntry.getKey();
memcachedReadInstancesByServerGroup.remove(serverGroup);
memcachedWriteInstancesByServerGroup.remove(serverGroup);
for (EVCacheClient client : instancesInAServerGroup) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup + " has no active servers. Cleaning up this ServerGroup.");
client.shutdown(0, TimeUnit.SECONDS);
client.getConnectionObserver().shutdown();
}
it.remove();
allEVCacheWriteClients = null;
}
}
}
private synchronized void refresh(boolean force) throws IOException {
final long start = System.currentTimeMillis();
if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; force : " + force);
try {
final Map<ServerGroup, EVCacheServerGroupConfig> instances = provider.discoverInstances(_appName);
if (log.isDebugEnabled()) log.debug("instances : " + instances);
// if no instances are found check to see if a clean up is needed
// and bail immediately.
if (instances == null || instances.isEmpty()) {
if (!memcachedInstancesByServerGroup.isEmpty()) cleanupMemcachedInstances(false);
return;
}
for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
if(!instances.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " does not exist or is not enabled or is out of service. We will shutdown this client and remove it.");
serverGroupDisabled(serverGroup);
}
}
boolean updateAllEVCacheWriteClients = false;
for (Entry<ServerGroup, EVCacheServerGroupConfig> serverGroupEntry : instances.entrySet()) {
final ServerGroup serverGroup = serverGroupEntry.getKey();
final EVCacheServerGroupConfig config = serverGroupEntry.getValue();
final Set<InetSocketAddress> discoverdInstanceInServerGroup = config.getInetSocketAddress();
final String zone = serverGroup.getZone();
final Set<InetSocketAddress> discoveredHostsInServerGroup = (discoverdInstanceInServerGroup == null)
? Collections.<InetSocketAddress> emptySet() : discoverdInstanceInServerGroup;
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tSize : " + discoveredHostsInServerGroup.size()
+ "\n\tInstances in ServerGroup : " + discoveredHostsInServerGroup);
if (discoveredHostsInServerGroup.size() == 0 && memcachedInstancesByServerGroup.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " has no active servers. Cleaning up this ServerGroup.");
serverGroupDisabled(serverGroup);
continue;
}
boolean instanceChangeInServerGroup = force;
if (instanceChangeInServerGroup) {
if (log.isWarnEnabled()) log.warn("FORCE REFRESH :: AppName :" + _appName + "; ServerGroup : "
+ serverGroup + "; Changed : " + instanceChangeInServerGroup);
} else {
instanceChangeInServerGroup = haveInstancesInServerGroupChanged(serverGroup, discoveredHostsInServerGroup);
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tinstanceChangeInServerGroup : " + instanceChangeInServerGroup);
if (!instanceChangeInServerGroup) {
// quick exit as everything looks fine. No new instances
// found and were inactive
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup
+ "; Changed : " + instanceChangeInServerGroup);
continue;
}
}
// Let us create a list of SocketAddress from the discovered
// instances in zone
final List<InetSocketAddress> memcachedSAInServerGroup = getMemcachedSocketAddressList(discoveredHostsInServerGroup);
if (memcachedSAInServerGroup.size() > 0) {
// now since there is a change with the instances in the
// zone. let us go ahead and create a new EVCacheClient with
// the new settings
final int poolSize = _poolSize.get();
final List<EVCacheClient> newClients = new ArrayList<EVCacheClient>(poolSize);
for (int i = 0; i < poolSize; i++) {
final int maxQueueSize = EVCacheConfig.getInstance().getPropertyRepository().get(_appName + ".max.queue.length", Integer.class).orElse(16384).get();
EVCacheClient client;
try {
client = new EVCacheClient(_appName, zone, i, config, memcachedSAInServerGroup, maxQueueSize,
_maxReadQueueSize, _readTimeout, _bulkReadTimeout, _opQueueMaxBlockTime, _operationTimeout, this, isDuet);
newClients.add(client);
final int id = client.getId();
if (log.isDebugEnabled()) log.debug("AppName :" + _appName + "; ServerGroup : " + serverGroup + "; intit : client.getId() : " + id);
lastReconcileTime = System.currentTimeMillis();
} catch (Exception e) {
incrementFailure(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, config.getServerGroup());
log.error("Unable to create EVCacheClient for app - " + _appName + " and Server Group - " + serverGroup.getName(), e);
}
}
if (newClients.size() > 0) {
setupNewClientsByServerGroup(serverGroup, newClients);
updateAllEVCacheWriteClients = true;
}
}
}
if(updateAllEVCacheWriteClients) {
setupAllEVCacheWriteClientsArray();
}
// Check to see if a zone has been removed, if so remove them from
// the active list
if (memcachedInstancesByServerGroup.size() > instances.size()) {
if (log.isDebugEnabled()) log.debug("\n\tAppName :" + _appName + ";\n\tServerGroup Discovered : " + instances.keySet()
+ ";\n\tCurrent ServerGroup in EVCache Client : " + memcachedInstancesByServerGroup.keySet());
cleanupMemcachedInstances(false);
}
updateMemcachedReadInstancesByZone();
updateQueueStats();
if (_pingServers.get()) pingServers();
} catch (Throwable t) {
log.error("Exception while refreshing the Server list", t);
} finally {
EVCacheMetricsFactory.getInstance().getPercentileTimer(EVCacheMetricsFactory.INTERNAL_POOL_REFRESH, tagList, Duration.ofMillis(100)).record(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
}
if (log.isDebugEnabled()) log.debug("refresh APP : " + _appName + "; DONE");
}
private void setupAllEVCacheWriteClientsArray() {
final List<EVCacheClient[]> newClients = new ArrayList<EVCacheClient[]>(_poolSize.get());
try {
final int serverGroupSize = memcachedWriteInstancesByServerGroup.size();
for(int ind = 0; ind < _poolSize.get(); ind++) {
final EVCacheClient[] clientArr = new EVCacheClient[serverGroupSize];
int i = 0;
for (ServerGroup serverGroup : memcachedWriteInstancesByServerGroup.keySet()) {
final List<EVCacheClient> clients = memcachedWriteInstancesByServerGroup.get(serverGroup);
if(clients.size() > ind) {
clientArr[i++] = clients.get(ind); // frequently used usecase
} else {
log.warn("Incorrect pool size detected for AppName : " + _appName + "; PoolSize " + _poolSize.get() + "; serverGroup : " + serverGroup + "; ind : " + ind + "; i : " + i);
if(clients.size() > 0) {
clientArr[i++] = clients.get(0);
}
}
}
newClients.add(clientArr);
}
this.allEVCacheWriteClients = new CircularIterator<EVCacheClient[]>(newClients);
} catch (Throwable t) {
log.error("Exception trying to create an array of writable EVCache Instances for App : " + _appName, t);
}
}
private void updateQueueStats() {
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
List<EVCacheClient> clients = memcachedInstancesByServerGroup.get(serverGroup);
for(EVCacheClient client : clients) {
getStatsGauge(EVCacheMetricsFactory.POOL_WRITE_Q_SIZE, client).set(Long.valueOf(client.getWriteQueueLength()));
getStatsGauge(EVCacheMetricsFactory.POOL_READ_Q_SIZE, client).set(Long.valueOf(client.getReadQueueLength()));
if(refreshConnectionOnReadQueueFull.get()) {
final Collection<MemcachedNode> allNodes = client.getNodeLocator().getAll();
for (MemcachedNode node : allNodes) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
if(evcNode.getReadQueueSize() >= refreshConnectionOnReadQueueFullSize.get().intValue()) {
EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_QUEUE_FULL, evcNode.getTags()).increment();
client.getEVCacheMemcachedClient().reconnectNode(evcNode);
}
}
}
}
}
}
}
public void pingServers() {
try {
final Map<ServerGroup, List<EVCacheClient>> allServers = getAllInstancesByZone();
for (Entry<ServerGroup, List<EVCacheClient>> entry : allServers.entrySet()) {
final List<EVCacheClient> listOfClients = entry.getValue();
for (EVCacheClient client : listOfClients) {
final Map<SocketAddress, String> versions = client.getVersions();
for (Entry<SocketAddress, String> vEntry : versions.entrySet()) {
if (log.isDebugEnabled()) log.debug("Host : " + vEntry.getKey() + " : " + vEntry.getValue());
}
}
}
if (duetClientPool != null)
duetClientPool.pingServers();
} catch (Throwable t) {
log.error("Error while pinging the servers", t);
}
}
public void serverGroupDisabled(final ServerGroup serverGroup) {
if (memcachedInstancesByServerGroup.containsKey(serverGroup)) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ " has no active servers. Cleaning up this ServerGroup.");
final List<EVCacheClient> clients = memcachedInstancesByServerGroup.remove(serverGroup);
memcachedReadInstancesByServerGroup.remove(serverGroup);
memcachedWriteInstancesByServerGroup.remove(serverGroup);
setupAllEVCacheWriteClientsArray();
for (EVCacheClient client : clients) {
if (log.isDebugEnabled()) log.debug("\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup
+ "\n\tClient : " + client + " will be shutdown in 30 seconds.");
client.shutdown(30, TimeUnit.SECONDS);
client.getConnectionObserver().shutdown();
}
}
if (duetClientPool != null)
duetClientPool.serverGroupDisabled(serverGroup);
}
public void refreshAsync(MemcachedNode node) {
if (log.isInfoEnabled()) log.info("Pool is being refresh as the EVCacheNode is not available. " + node.toString());
if(!_disableAsyncRefresh.get()) {
if (node instanceof EVCacheNode) {
final EVCacheNode evcNode = ((EVCacheNode) node);
EVCacheMetricsFactory.getInstance().getCounter(EVCacheMetricsFactory.POOL_REFRESH_ASYNC, evcNode.getTags()).increment();
}
boolean force = (System.currentTimeMillis() - lastReconcileTime) > ( manager.getDefaultRefreshInterval().get() * 1000 ) ? true : false;
if(!force) force = !node.isActive();
refreshPool(true, force);
}
if (duetClientPool != null)
duetClientPool.refreshAsync(node);
}
public void run() {
try {
refresh();
} catch (Throwable t) {
if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list for " + _appName, t);
}
}
void shutdown() {
if (log.isDebugEnabled()) log.debug("EVCacheClientPool for App : " + _appName + " and Zone : " + _zone + " is being shutdown.");
_shutdown = true;
for(ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
if (log.isDebugEnabled()) log.debug("\nSHUTDOWN\n\tApp : " + _appName + "\n\tServerGroup : " + serverGroup);
serverGroupDisabled(serverGroup);
}
setupMonitoring();
}
private Gauge getConfigGauge(String metric, ServerGroup serverGroup) {
final String name = (serverGroup == null ? metric : metric + serverGroup.getName() + isInWriteOnly(serverGroup));
Gauge gauge = gaugeMap.get(name );
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(5);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric));
if(serverGroup != null) {
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
}
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_POOL_SG_CONFIG, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
private Gauge getStatsGauge(String metric, EVCacheClient client) {
final String name = metric + client.getServerGroupName();
Gauge gauge = gaugeMap.get(name );
if(gauge != null) return gauge;
final List<Tag> tags = new ArrayList<Tag>(4);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.STAT_NAME, metric));
tags.add(new BasicTag(EVCacheMetricsFactory.CONNECTION_ID, String.valueOf(client.getId())));
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, client.getServerGroupName()));
final Id id = EVCacheMetricsFactory.getInstance().getId(EVCacheMetricsFactory.INTERNAL_STATS, tags);
gauge = EVCacheMetricsFactory.getInstance().getRegistry().gauge(id);
gaugeMap.put(name, gauge);
return gauge;
}
private void incrementFailure(String metric, ServerGroup serverGroup) {
final List<Tag> tags = new ArrayList<Tag>(4);
EVCacheMetricsFactory.getInstance().addAppNameTags(tags, _appName);
tags.add(new BasicTag(EVCacheMetricsFactory.CONFIG_NAME, metric));
tags.add(new BasicTag(EVCacheMetricsFactory.SERVERGROUP, serverGroup.getName()));
EVCacheMetricsFactory.getInstance().increment(EVCacheMetricsFactory.INTERNAL_POOL_INIT_ERROR, tags);
}
private void reportPoolConifg() {
final int size = getPoolSize();
for(ServerGroup key : memcachedInstancesByServerGroup.keySet()) {
getConfigGauge("poolSize", key).set(memcachedInstancesByServerGroup.get(key).size());
final EVCacheClient client = memcachedInstancesByServerGroup.get(key).get(0);
if(client != null) {
getConfigGauge("readTimeout", key).set(getReadTimeout().get());
getConfigGauge("bulkReadTimeout", key).set(getBulkReadTimeout().get());
getConfigGauge("numberOfServerGoups", key).set(memcachedInstancesByServerGroup.size());
getConfigGauge("maxReadQueueLength", key).set(_maxReadQueueSize.get());
getConfigGauge("instanceCount", key).set(client.getMemcachedNodesInZone().size());;
final EVCacheConnectionObserver connectionObserver = client.getConnectionObserver();
if(connectionObserver != null) {
final int activeServerCount = connectionObserver.getActiveServerCount();
final int inActiveServerCount = connectionObserver.getInActiveServerCount();
final int sizeInHashing = client.getNodeLocator().getAll().size();
getConfigGauge("activeServerCount", key).set(Long.valueOf(activeServerCount));
getConfigGauge("activeConnectionCount", key).set(Long.valueOf(activeServerCount * size));
getConfigGauge("inActiveServerCount", key).set(Long.valueOf(inActiveServerCount));
getConfigGauge("sizeInHashing", key).set(Long.valueOf(sizeInHashing));
}
final List<EVCacheClient> readClients = memcachedReadInstancesByServerGroup.get(key);
if (readClients != null && readClients.size() > 0) {
getConfigGauge(EVCacheMetricsFactory.POOL_READ_INSTANCES, key).set(Long.valueOf(readClients.get(0).getConnectionObserver().getActiveServerCount()));
}
final List<EVCacheClient> writeClients = memcachedWriteInstancesByServerGroup.get(key);
if (writeClients != null && writeClients.size() > 0) {
getConfigGauge(EVCacheMetricsFactory.POOL_WRITE_INSTANCES, key).set(Long.valueOf(writeClients.get(0).getConnectionObserver().getActiveServerCount()));
}
}
}
}
private void setupMonitoring() {
try {
final ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=" + _appName
+ ",SubGroup=pool");
final MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
if (!_shutdown) {
mbeanServer.registerMBean(this, mBeanName);
}
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
public int getInstanceCount() {
int instances = 0;
for (ServerGroup serverGroup : memcachedInstancesByServerGroup.keySet()) {
instances += memcachedInstancesByServerGroup.get(serverGroup).get(0).getConnectionObserver().getActiveServerCount();
}
if (duetClientPool != null)
instances += duetClientPool.getInstanceCount();
return instances;
}
public Map<String, String> getInstancesByZone() {
Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) {
final List<EVCacheClient> instanceList = memcachedInstancesByServerGroup.get(zone);
instanceMap.put(zone.toString(), instanceList.toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getInstancesByZone());
return instanceMap;
}
public Map<String, Integer> getInstanceCountByZone() {
final Map<String, Integer> instancesByZone = new HashMap<String, Integer>(memcachedInstancesByServerGroup.size() * 2);
for (ServerGroup zone : memcachedInstancesByServerGroup.keySet()) {
instancesByZone.put(zone.getName(), Integer.valueOf(memcachedInstancesByServerGroup.get(zone).get(0).getConnectionObserver().getActiveServerCount()));
}
if (duetClientPool != null)
instancesByZone.putAll(duetClientPool.getInstanceCountByZone());
return instancesByZone;
}
public Map<String, String> getReadZones() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) {
instanceMap.put(key.getName(), memcachedReadInstancesByServerGroup.get(key).toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadZones());
return instanceMap;
}
public Map<String, Integer> getReadInstanceCountByZone() {
final Map<String, Integer> instanceMap = new HashMap<String, Integer>();
for (ServerGroup key : memcachedReadInstancesByServerGroup.keySet()) {
instanceMap.put(key.getName(), Integer.valueOf(memcachedReadInstancesByServerGroup.get(key).get(0)
.getConnectionObserver().getActiveServerCount()));
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadInstanceCountByZone());
return instanceMap;
}
public Map<String, String> getWriteZones() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) {
instanceMap.put(key.toString(), memcachedWriteInstancesByServerGroup.get(key).toString());
}
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getWriteZones());
return instanceMap;
}
private Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZoneInternal() {
return Collections.unmodifiableMap(memcachedInstancesByServerGroup);
}
public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByZone() {
if (duetClientPool != null) {
Map<ServerGroup, List<EVCacheClient>> allInstanceMap = new ConcurrentHashMap<>();
allInstanceMap.putAll(getAllInstancesByZoneInternal());
allInstanceMap.putAll(duetClientPool.getAllInstancesByZone());
return Collections.unmodifiableMap(allInstanceMap);
}
return getAllInstancesByZoneInternal();
}
Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroupInternal() {
return memcachedInstancesByServerGroup;
}
public Map<ServerGroup, List<EVCacheClient>> getAllInstancesByServerGroup() {
if (duetClientPool == null) {
return getAllInstancesByServerGroupInternal();
}
Map<ServerGroup, List<EVCacheClient>> allInstancesByServerGroup = new ConcurrentHashMap<>();
allInstancesByServerGroup.putAll(getAllInstancesByServerGroupInternal());
allInstancesByServerGroup.putAll(duetClientPool.getAllInstancesByServerGroup());
return allInstancesByServerGroup;
}
private Map<String, Integer> getWriteInstanceCountByZoneInternal() {
final Map<String, Integer> instanceMap = new HashMap<String, Integer>();
for (ServerGroup key : memcachedWriteInstancesByServerGroup.keySet()) {
instanceMap.put(key.toString(), Integer.valueOf(memcachedWriteInstancesByServerGroup.get(key).get(0).getConnectionObserver().getActiveServerCount()));
}
return instanceMap;
}
public Map<String, Integer> getWriteInstanceCountByZone() {
Map<String, Integer> instanceMap = getWriteInstanceCountByZoneInternal();
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getWriteInstanceCountByZone());
return instanceMap;
}
private Map<String, String> getReadServerGroupByZoneInternal() {
final Map<String, String> instanceMap = new HashMap<String, String>();
for (String key : readServerGroupByZone.keySet()) {
instanceMap.put(key, readServerGroupByZone.get(key).toString());
}
return instanceMap;
}
public Map<String, String> getReadServerGroupByZone() {
Map<String, String> instanceMap = getReadServerGroupByZoneInternal();
if (duetClientPool != null)
instanceMap.putAll(duetClientPool.getReadServerGroupByZone());
return instanceMap;
}
public void refreshPool() {
refreshPool(false, true);
if (duetClientPool != null)
duetClientPool.refreshPool(false, true);
}
public void refreshPool(boolean async, boolean force) {
if (log.isDebugEnabled()) log.debug("Refresh Pool : async : " + async + "; force : " + force);
try {
if(async && asyncRefreshExecutor.getQueue().size() == 0) {
asyncRefreshExecutor.submit(new Runnable() {
@Override
public void run() {
try {
refresh(force);
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
});
} else {
refresh(force);
}
} catch (Throwable t) {
if (log.isDebugEnabled()) log.debug("Error Refreshing EVCache Instance list from MBean : " + _appName, t);
}
if (duetClientPool != null)
duetClientPool.refreshPool(async, force);
}
public String getFallbackServerGroup() {
if (memcachedFallbackReadInstances.getSize() != 0 || duetClientPool == null)
return memcachedFallbackReadInstances.toString();
return duetClientPool.getFallbackServerGroup();
}
public boolean supportsFallback() {
return memcachedFallbackReadInstances.getSize() > 1 || (duetClientPool != null && duetPrimary.get() && duetClientPool.supportsFallback());
}
public boolean isLogEventEnabled() {
return (logOperations.get() > 0);
}
public boolean shouldLogOperation(String key, String op) {
if (!isLogEventEnabled()) return false;
if (!logOperationCalls.get().contains(op)) return false;
return key.hashCode() % 1000 <= logOperations.get();
}
@Override
public String getLocalServerGroupCircularIterator() {
return (localServerGroupIterator == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getLocalServerGroupCircularIterator()) : localServerGroupIterator.toString();
}
@Override
public String getEVCacheWriteClientsCircularIterator() {
return (allEVCacheWriteClients == null) ? (duetClientPool == null ? "NONE" : duetClientPool.getEVCacheWriteClientsCircularIterator()) : allEVCacheWriteClients.toString();
}
public String getPoolDetails() {
return toString();
}
@Override
public String toString() {
return "\nEVCacheClientPool [\n\t_appName=" + _appName + ",\n\t_zone=" + _zone
+ ",\n\tlocalServerGroupIterator=" + localServerGroupIterator + ",\n\t_poolSize=" + _poolSize + ",\n\t_readTimeout=" + _readTimeout
+ ",\n\t_bulkReadTimeout=" + _bulkReadTimeout + ",\n\tlogOperations=" + logOperations + ",\n\t_opQueueMaxBlockTime=" + _opQueueMaxBlockTime
+ ",\n\t_operationTimeout=" + _operationTimeout + ",\n\t_maxReadQueueSize=" + _maxReadQueueSize + ",\n\t_pingServers=" + _pingServers
+ ",\n\twriteOnlyFastPropertyMap=" + writeOnlyFastPropertyMap + ",\n\tnumberOfModOps=" + numberOfModOps.get() + ",\n\t_shutdown=" + _shutdown
+ ",\n\tmemcachedInstancesByServerGroup=" + memcachedInstancesByServerGroup + ",\n\tmemcachedReadInstancesByServerGroup=" + memcachedReadInstancesByServerGroup
+ ",\n\tmemcachedWriteInstancesByServerGroup=" + memcachedWriteInstancesByServerGroup + ",\n\treadServerGroupByZone=" + readServerGroupByZone
+ ",\n\tmemcachedFallbackReadInstances=" + memcachedFallbackReadInstances + "\n]"
+ ", \n\tallEVCacheWriteClients=" + allEVCacheWriteClients
+ "\n]" + (duetClientPool == null ? "" : duetClientPool.toString());
}
public int getPoolSize() {
return _poolSize.get() + (duetClientPool == null ? 0 : duetClientPool.getPoolSize());
}
public Property<Integer> getLogOperations() {
return logOperations;
}
public Property<Integer> getOpQueueMaxBlockTime() {
return _opQueueMaxBlockTime;
}
public Property<Integer> getOperationTimeout() {
if (duetClientPool !=null && duetPrimary.get()) {
return duetClientPool.getOperationTimeout();
}
return _operationTimeout;
}
public Property<Integer> getMaxReadQueueSize() {
return _maxReadQueueSize;
}
public Property<Boolean> getPingServers() {
return _pingServers;
}
public long getNumberOfModOps() {
return numberOfModOps.get();
}
public boolean isShutdown() {
return _shutdown;
}
public String getZone() {
return this._zone;
}
public String getAppName() {
return this._appName;
}
public EVCacheClientPoolManager getEVCacheClientPoolManager() {
return this.manager;
}
public Map<ServerGroup, Property<Boolean>> getWriteOnlyFastPropertyMap() {
if (duetClientPool != null) {
Map<ServerGroup, Property<Boolean>> allMap = new ConcurrentHashMap<>();
allMap.putAll(writeOnlyFastPropertyMap);
allMap.putAll(duetClientPool.getWriteOnlyFastPropertyMap());
return Collections.unmodifiableMap(allMap);
}
return Collections.unmodifiableMap(writeOnlyFastPropertyMap);
}
public Property<Integer> getReadTimeout() {
if (duetClientPool != null && duetPrimary.get()) {
return duetClientPool.getReadTimeout();
}
return _readTimeout;
}
public Property<Integer> getBulkReadTimeout() {
return _bulkReadTimeout;
}
/*
* This method is helpful in cases where there is typically a large backlog of work queued up, and is
* expensive to loose all that work when a client is shut down.
* Block the thread until all the queues are processed or at most 30 seconds.
* Will return the count of items left in the queues. 0 means none left.
*/
public int join() {
int size = 0;
int counter = 0;
do {
for(List<EVCacheClient> clientList : getAllInstancesByServerGroup().values()) {
for(EVCacheClient client : clientList) {
size +=client.getWriteQueueLength();
size +=client.getReadQueueLength();
}
}
if(size > 0) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
log.error("");
}
}
if(counter++ > 3000) break;
} while(size > 0);
return size;
}
public long getLastReconcileTime() {
return lastReconcileTime;
}
public Property<Set<String>> getOperationToLog() {
return logOperationCalls;
}
}
| 792 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheClientPoolManager.java
|
package com.netflix.evcache.pool;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import javax.annotation.PreDestroy;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.EVCacheImpl;
import com.netflix.evcache.EVCacheInMemoryCache;
import com.netflix.evcache.connection.ConnectionFactoryBuilder;
import com.netflix.evcache.connection.IConnectionBuilder;
import com.netflix.evcache.event.EVCacheEventListener;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.transcoders.Transcoder;
/**
* A manager that holds Pools for each EVCache app. When this class is
* initialized all the EVCache apps defined in the property evcache.appsToInit
* will be initialized and added to the pool. If a service knows all the EVCache
* app it uses, then it can define this property and pass a list of EVCache apps
* that needs to be initialized.
*
* An EVCache app can also be initialized by Injecting
* <code>EVCacheClientPoolManager</code> and calling <code>
* initEVCache(<app name>)
* </code>
*
* This typically should be done in the client libraries that need to initialize
* an EVCache app. For Example VHSViewingHistoryLibrary in its initLibrary
* initializes EVCACHE_VH by calling
*
* <pre>
* {@literal @}Inject
* public VHSViewingHistoryLibrary(EVCacheClientPoolManager instance,...) {
* ....
* instance.initEVCache("EVCACHE_VH");
* ...
* }
* </pre>
*
* @author smadappa
*
*/
@edu.umd.cs.findbugs.annotations.SuppressFBWarnings({ "PRMC_POSSIBLY_REDUNDANT_METHOD_CALLS", "DM_CONVERT_CASE", "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" })
@Singleton
public class EVCacheClientPoolManager {
/**
* <b>NOTE : Should be the only static referenced variables</b>
* **/
private static final Logger log = LoggerFactory.getLogger(EVCacheClientPoolManager.class);
private volatile static EVCacheClientPoolManager instance;
private final Property<Integer> defaultReadTimeout;
private final Property<String> logEnabledApps;
private final Property<Integer> defaultRefreshInterval;
private final Map<String, EVCacheClientPool> poolMap = new ConcurrentHashMap<String, EVCacheClientPool>();
private final Map<EVCacheClientPool, ScheduledFuture<?>> scheduledTaskMap = new HashMap<EVCacheClientPool, ScheduledFuture<?>>();
private final EVCacheScheduledExecutor asyncExecutor;
private final EVCacheExecutor syncExecutor;
private final List<EVCacheEventListener> evcacheEventListenerList;
private final IConnectionBuilder connectionFactoryProvider;
private final EVCacheNodeList evcacheNodeList;
private final EVCacheConfig evcConfig;
@Inject
public EVCacheClientPoolManager(IConnectionBuilder connectionFactoryprovider, EVCacheNodeList evcacheNodeList, EVCacheConfig evcConfig) {
instance = this;
this.connectionFactoryProvider = connectionFactoryprovider;
this.evcacheNodeList = evcacheNodeList;
this.evcConfig = evcConfig;
this.evcacheEventListenerList = new CopyOnWriteArrayList<EVCacheEventListener>();
String clientCurrentInstanceId = null;
if(clientCurrentInstanceId == null) clientCurrentInstanceId= System.getenv("EC2_INSTANCE_ID");
if(clientCurrentInstanceId == null) clientCurrentInstanceId= System.getenv("NETFLIX_INSTANCE_ID");
if(log.isInfoEnabled()) log.info("\nClient Current InstanceId from env = " + clientCurrentInstanceId);
if(clientCurrentInstanceId == null && EVCacheConfig.getInstance().getPropertyRepository() != null) clientCurrentInstanceId = EVCacheConfig.getInstance().getPropertyRepository().get("EC2_INSTANCE_ID", String.class).orElse(null).get();
if(clientCurrentInstanceId == null && EVCacheConfig.getInstance().getPropertyRepository() != null) clientCurrentInstanceId = EVCacheConfig.getInstance().getPropertyRepository().get("NETFLIX_INSTANCE_ID", String.class).orElse(null).get();
if(clientCurrentInstanceId != null && !clientCurrentInstanceId.equalsIgnoreCase("localhost")) {
this.defaultReadTimeout = EVCacheConfig.getInstance().getPropertyRepository().get("default.read.timeout", Integer.class).orElse(20);
if(log.isInfoEnabled()) log.info("\nClient Current InstanceId = " + clientCurrentInstanceId + " which is probably a cloud location. The default.read.timeout = " + defaultReadTimeout);
} else { //Assuming this is not in cloud so bump up the timeouts
this.defaultReadTimeout = EVCacheConfig.getInstance().getPropertyRepository().get("default.read.timeout", Integer.class).orElse(750);
if(log.isInfoEnabled()) log.info("\n\nClient Current InstanceId = " + clientCurrentInstanceId + ". Probably a non-cloud instance. The default.read.timeout = " + defaultReadTimeout + "\n\n");
}
this.logEnabledApps = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheClientPoolManager.log.apps", String.class).orElse("*");
this.defaultRefreshInterval = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheClientPoolManager.refresh.interval", Integer.class).orElse(60);
this.asyncExecutor = new EVCacheScheduledExecutor(Runtime.getRuntime().availableProcessors(),Runtime.getRuntime().availableProcessors(), 30, TimeUnit.SECONDS, new ThreadPoolExecutor.CallerRunsPolicy(), "scheduled");
asyncExecutor.prestartAllCoreThreads();
this.syncExecutor = new EVCacheExecutor(Runtime.getRuntime().availableProcessors(),Runtime.getRuntime().availableProcessors(), 30, TimeUnit.SECONDS, new ThreadPoolExecutor.CallerRunsPolicy(), "pool");
syncExecutor.prestartAllCoreThreads();
initAtStartup();
}
public IConnectionBuilder getConnectionFactoryProvider() {
return connectionFactoryProvider;
}
public void addEVCacheEventListener(EVCacheEventListener listener) {
this.evcacheEventListenerList.add(listener);
}
public void addEVCacheEventListener(EVCacheEventListener listener, int index) {
if(index < evcacheEventListenerList.size()) {
this.evcacheEventListenerList.add(index, listener);
} else {
this.evcacheEventListenerList.add(listener);
}
}
public void removeEVCacheEventListener(EVCacheEventListener listener) {
this.evcacheEventListenerList.remove(listener);
}
public List<EVCacheEventListener> getEVCacheEventListeners() {
return this.evcacheEventListenerList;
}
public EVCacheConfig getEVCacheConfig() {
return this.evcConfig;
}
/**
* @deprecated. Please use DependencyInjection (@Inject) to obtain
* {@link EVCacheClientPoolManager}. The use of this can result in
* unintended behavior where you will not be able to talk to evcache
* instances.
*/
@Deprecated
public static EVCacheClientPoolManager getInstance() {
if (instance == null) {
new EVCacheClientPoolManager(new ConnectionFactoryBuilder(), new SimpleNodeListProvider(), EVCacheConfig.getInstance());
if (!EVCacheConfig.getInstance().getPropertyRepository().get("evcache.use.simple.node.list.provider", Boolean.class).orElse(false).get()) {
if(log.isDebugEnabled()) log.debug("Please make sure EVCacheClientPoolManager is injected first. This is not the appropriate way to init EVCacheClientPoolManager."
+ " If you are using simple node list provider please set evcache.use.simple.node.list.provider property to true.", new Exception());
}
}
return instance;
}
public void initAtStartup() {
final String appsToInit = EVCacheConfig.getInstance().getPropertyRepository().get("evcache.appsToInit", String.class).orElse("").get();
if (appsToInit != null && appsToInit.length() > 0) {
final StringTokenizer apps = new StringTokenizer(appsToInit, ",");
while (apps.hasMoreTokens()) {
final String app = getAppName(apps.nextToken());
if (log.isDebugEnabled()) log.debug("Initializing EVCache - " + app);
initEVCache(app);
}
}
}
/**
* Will init the given EVCache app call. If one is already initialized for
* the given app method returns without doing anything.
*
* @param app
* - name of the evcache app
*/
public final synchronized EVCacheClientPool initEVCache(String app) {
return initEVCache(app, false);
}
public final synchronized EVCacheClientPool initEVCache(String app, boolean isDuet) {
if (app == null || (app = app.trim()).length() == 0) throw new IllegalArgumentException("param app name null or space");
final String APP = getAppName(app);
if (poolMap.containsKey(APP)) return poolMap.get(APP);
final EVCacheClientPool pool = new EVCacheClientPool(APP, evcacheNodeList, asyncExecutor, this, isDuet);
scheduleRefresh(pool);
poolMap.put(APP, pool);
return pool;
}
private void scheduleRefresh(EVCacheClientPool pool) {
final ScheduledFuture<?> task = asyncExecutor.scheduleWithFixedDelay(pool, 30, defaultRefreshInterval.get(), TimeUnit.SECONDS);
scheduledTaskMap.put(pool, task);
}
/**
* Given the appName get the EVCacheClientPool. If the app is already
* created then will return the existing instance. If not one will be
* created and returned.
*
* @param _app
* - name of the evcache app
* @return the Pool for the give app.
* @throws IOException
*/
public EVCacheClientPool getEVCacheClientPool(String _app) {
final String app = getAppName(_app);
final EVCacheClientPool evcacheClientPool = poolMap.get(app);
if (evcacheClientPool != null) return evcacheClientPool;
initEVCache(app);
return poolMap.get(app);
}
public Map<String, EVCacheClientPool> getAllEVCacheClientPool() {
return new HashMap<String, EVCacheClientPool>(poolMap);
}
@PreDestroy
public void shutdown() {
asyncExecutor.shutdown();
syncExecutor.shutdown();
for (EVCacheClientPool pool : poolMap.values()) {
pool.shutdown();
}
}
public boolean shouldLog(String appName) {
if ("*".equals(logEnabledApps.get())) return true;
if (logEnabledApps.get().indexOf(appName) != -1) return true;
return false;
}
public Property<Integer> getDefaultReadTimeout() {
return defaultReadTimeout;
}
public Property<Integer> getDefaultRefreshInterval() {
return defaultRefreshInterval;
}
public EVCacheScheduledExecutor getEVCacheScheduledExecutor() {
return asyncExecutor;
}
public EVCacheExecutor getEVCacheExecutor() {
return syncExecutor;
}
private String getAppName(String _app) {
_app = _app.toUpperCase();
Boolean ignoreAlias = EVCacheConfig.getInstance().getPropertyRepository()
.get("EVCacheClientPoolManager." + _app + ".ignoreAlias", Boolean.class)
.orElseGet("EVCacheClientPoolManager.ignoreAlias")
.orElse(false).get();
final String app = ignoreAlias ? _app :
EVCacheConfig.getInstance().getPropertyRepository()
.get("EVCacheClientPoolManager." + _app + ".alias", String.class)
.orElse(_app).get().toUpperCase();
if (log.isDebugEnabled()) log.debug("Original App Name : " + _app + "; Alias App Name : " + app);
if(app != null && app.length() > 0) return app.toUpperCase();
return _app;
}
private WriteLock writeLock = new ReentrantReadWriteLock().writeLock();
private final Map<String, EVCacheInMemoryCache<?>> inMemoryMap = new ConcurrentHashMap<String, EVCacheInMemoryCache<?>>();
@SuppressWarnings("unchecked")
public <T> EVCacheInMemoryCache<T> createInMemoryCache(Transcoder<T> tc, EVCacheImpl impl) {
final String name = impl.getCachePrefix() == null ? impl.getAppName() : impl.getAppName() + impl.getCachePrefix();
EVCacheInMemoryCache<T> cache = (EVCacheInMemoryCache<T>) inMemoryMap.get(name);
if(cache == null) {
writeLock.lock();
if((cache = getInMemoryCache(name)) == null) {
cache = new EVCacheInMemoryCache<T>(impl.getAppName(), tc, impl);
inMemoryMap.put(name, cache);
}
writeLock.unlock();
}
return cache;
}
@SuppressWarnings("unchecked")
public <T> EVCacheInMemoryCache<T> getInMemoryCache(String appName) {
return (EVCacheInMemoryCache<T>) inMemoryMap.get(appName);
}
}
| 793 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutor.java
|
package com.netflix.evcache.pool;
import java.lang.management.ManagementFactory;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.patterns.ThreadPoolMonitor;
public class EVCacheExecutor extends ThreadPoolExecutor implements EVCacheExecutorMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheExecutor.class);
private final Property<Integer> maxAsyncPoolSize;
private final Property<Integer> coreAsyncPoolSize;
private final String name;
public EVCacheExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, RejectedExecutionHandler handler, String name) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit,
new LinkedBlockingQueue<Runnable>(),
new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheExecutor-" + name + "-%d").build());
this.name = name;
maxAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".max.size", Integer.class).orElse(maximumPoolSize);
setMaximumPoolSize(maxAsyncPoolSize.get());
coreAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get("EVCacheExecutor." + name + ".core.size", Integer.class).orElse(corePoolSize);
setCorePoolSize(coreAsyncPoolSize.get());
setKeepAliveTime(keepAliveTime, unit);
maxAsyncPoolSize.subscribe(this::setMaximumPoolSize);
coreAsyncPoolSize.subscribe(i -> {
setCorePoolSize(i);
prestartAllCoreThreads();
});
setupMonitoring(name);
ThreadPoolMonitor.attach(EVCacheMetricsFactory.getInstance().getRegistry(), this, EVCacheMetricsFactory.INTERNAL_EXECUTOR + "-" + name);
}
private void setupMonitoring(String name) {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
public void shutdown() {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
mbeanServer.unregisterMBean(mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
super.shutdown();
}
@Override
public int getQueueSize() {
return getQueue().size();
}
}
| 794 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheScheduledExecutorMBean.java
|
package com.netflix.evcache.pool;
public interface EVCacheScheduledExecutorMBean {
boolean isShutdown();
boolean isTerminating();
boolean isTerminated();
int getCorePoolSize();
int getMaximumPoolSize();
int getQueueSize();
int getPoolSize();
int getActiveCount();
int getLargestPoolSize();
long getTaskCount();
long getCompletedTaskCount();
}
| 795 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheScheduledExecutor.java
|
package com.netflix.evcache.pool;
import java.lang.management.ManagementFactory;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.metrics.EVCacheMetricsFactory;
import com.netflix.evcache.util.EVCacheConfig;
import com.netflix.spectator.api.patterns.ThreadPoolMonitor;
public class EVCacheScheduledExecutor extends ScheduledThreadPoolExecutor implements EVCacheScheduledExecutorMBean {
private static final Logger log = LoggerFactory.getLogger(EVCacheScheduledExecutor.class);
private final Property<Integer> maxAsyncPoolSize;
private final Property<Integer> coreAsyncPoolSize;
private final String name;
public EVCacheScheduledExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, RejectedExecutionHandler handler, String name) {
super(corePoolSize, handler);
this.name = name;
maxAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get(name + "executor.max.size", Integer.class).orElse(maximumPoolSize);
setMaximumPoolSize(maxAsyncPoolSize.get());
coreAsyncPoolSize = EVCacheConfig.getInstance().getPropertyRepository().get(name + "executor.core.size", Integer.class).orElse(corePoolSize);
setCorePoolSize(coreAsyncPoolSize.get());
setKeepAliveTime(keepAliveTime, unit);
final ThreadFactory asyncFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat( "EVCacheScheduledExecutor-" + name + "-%d").build();
setThreadFactory(asyncFactory);
maxAsyncPoolSize.subscribe(this::setMaximumPoolSize);
coreAsyncPoolSize.subscribe(i -> {
setCorePoolSize(i);
prestartAllCoreThreads();
});
setupMonitoring(name);
ThreadPoolMonitor.attach(EVCacheMetricsFactory.getInstance().getRegistry(), this, EVCacheMetricsFactory.INTERNAL_EXECUTOR_SCHEDULED + "-" + name);
}
private void setupMonitoring(String name) {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
if (mbeanServer.isRegistered(mBeanName)) {
if (log.isDebugEnabled()) log.debug("MBEAN with name " + mBeanName + " has been registered. Will unregister the previous instance and register a new one.");
mbeanServer.unregisterMBean(mBeanName);
}
mbeanServer.registerMBean(this, mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
}
public void shutdown() {
try {
ObjectName mBeanName = ObjectName.getInstance("com.netflix.evcache:Group=ThreadPool,SubGroup="+name);
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
mbeanServer.unregisterMBean(mBeanName);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Exception", e);
}
super.shutdown();
}
@Override
public int getQueueSize() {
return getQueue().size();
}
}
| 796 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheExecutorMBean.java
|
package com.netflix.evcache.pool;
public interface EVCacheExecutorMBean {
boolean isShutdown();
boolean isTerminating();
boolean isTerminated();
int getCorePoolSize();
int getMaximumPoolSize();
int getQueueSize();
int getPoolSize();
int getActiveCount();
int getLargestPoolSize();
long getTaskCount();
long getCompletedTaskCount();
}
| 797 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/EVCacheKetamaNodeLocatorConfiguration.java
|
package com.netflix.evcache.pool;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.Map;
import com.netflix.archaius.api.Property;
import com.netflix.evcache.util.EVCacheConfig;
import net.spy.memcached.MemcachedNode;
import net.spy.memcached.util.DefaultKetamaNodeLocatorConfiguration;
public class EVCacheKetamaNodeLocatorConfiguration extends DefaultKetamaNodeLocatorConfiguration {
protected final EVCacheClient client;
protected final Property<Integer> bucketSize;
protected final Map<MemcachedNode, String> socketAddresses = new HashMap<MemcachedNode, String>();
public EVCacheKetamaNodeLocatorConfiguration(EVCacheClient client) {
this.client = client;
this.bucketSize = EVCacheConfig.getInstance().getPropertyRepository().get(client.getAppName() + "." + client.getServerGroupName() + ".bucket.size", Integer.class)
.orElseGet(client.getAppName()+ ".bucket.size").orElse(super.getNodeRepetitions());
}
/**
* Returns the number of discrete hashes that should be defined for each
* node in the continuum.
*
* @return NUM_REPS repetitions.
*/
public int getNodeRepetitions() {
return bucketSize.get().intValue();
}
/**
* Returns the socket address of a given MemcachedNode.
*
* @param node - The MemcachedNode which we're interested in
* @return The socket address of the given node format is of the following
* For ec2 classic instances - "publicHostname/privateIp:port" (ex - ec2-174-129-159-31.compute-1.amazonaws.com/10.125.47.114:11211)
* For ec2 vpc instances - "privateIp/privateIp:port" (ex - 10.125.47.114/10.125.47.114:11211)
* privateIp is also known as local ip
*/
@Override
public String getKeyForNode(MemcachedNode node, int repetition) {
String result = socketAddresses.get(node);
if(result == null) {
final SocketAddress socketAddress = node.getSocketAddress();
if(socketAddress instanceof InetSocketAddress) {
final InetSocketAddress isa = (InetSocketAddress)socketAddress;
result = isa.getHostName() + '/' + isa.getAddress().getHostAddress() + ":11211";
} else {
result=String.valueOf(socketAddress);
if (result.startsWith("/")) {
result = result.substring(1);
}
}
socketAddresses.put(node, result);
}
return result + "-" + repetition;
}
@Override
public String toString() {
return "EVCacheKetamaNodeLocatorConfiguration [EVCacheClient=" + client + ", BucketSize=" + getNodeRepetitions() + "]";
}
}
| 798 |
0 |
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache
|
Create_ds/EVCache/evcache-core/src/main/java/com/netflix/evcache/pool/ChunkTranscoder.java
|
package com.netflix.evcache.pool;
import net.spy.memcached.CachedData;
import net.spy.memcached.transcoders.BaseSerializingTranscoder;
import net.spy.memcached.transcoders.Transcoder;
/**
* A local transcoder used only by EVCache client to ensure we don't try to deserialize chunks
*
* @author smadappa
*
*/
public class ChunkTranscoder extends BaseSerializingTranscoder implements Transcoder<CachedData> {
public ChunkTranscoder() {
super(Integer.MAX_VALUE);
}
public boolean asyncDecode(CachedData d) {
return false;
}
public CachedData decode(CachedData d) {
return d;
}
public CachedData encode(CachedData o) {
return o;
}
public int getMaxSize() {
return Integer.MAX_VALUE;
}
}
| 799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.